summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xbin/nova-manage51
-rw-r--r--doc/source/devref/filter_scheduler.rst97
-rw-r--r--etc/nova/nova.conf.sample2
-rw-r--r--etc/nova/policy.json5
-rw-r--r--nova/api/ec2/cloud.py6
-rw-r--r--nova/api/ec2/ec2utils.py6
-rw-r--r--nova/api/metadata/base.py51
-rw-r--r--nova/api/openstack/compute/contrib/disk_config.py3
-rw-r--r--nova/api/openstack/compute/contrib/keypairs.py9
-rw-r--r--nova/api/openstack/compute/contrib/quotas.py78
-rw-r--r--nova/api/openstack/compute/contrib/scheduler_hints.py5
-rw-r--r--nova/api/openstack/compute/contrib/volumes.py8
-rw-r--r--nova/api/openstack/compute/limits.py15
-rw-r--r--nova/block_device.py50
-rw-r--r--nova/compute/api.py192
-rw-r--r--nova/compute/manager.py62
-rw-r--r--nova/compute/rpcapi.py8
-rw-r--r--nova/compute/utils.py52
-rw-r--r--nova/console/api.py8
-rw-r--r--nova/context.py5
-rw-r--r--nova/crypto.py1
-rw-r--r--nova/db/api.py114
-rw-r--r--nova/db/sqlalchemy/api.py266
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/082_essex.py1
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/088_change_instance_id_to_uuid_in_block_device_mapping.py1
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/090_modify_volume_id_datatype.py1
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/105_instance_info_caches_uses_uuid.py3
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/106_add_foreign_keys.py3
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/107_add_instance_id_mappings.py2
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/108_task_log.py2
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/109_drop_dns_domains_project_id_fkey.py1
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/116_drop_user_quotas_key_and_value.py98
-rw-r--r--nova/db/sqlalchemy/models.py15
-rw-r--r--nova/exception.py9
-rw-r--r--nova/network/linux_net.py104
-rw-r--r--nova/network/nova_ipam_lib.py1
-rw-r--r--nova/notifications.py4
-rw-r--r--nova/openstack/common/cfg.py85
-rw-r--r--nova/openstack/common/notifier/api.py1
-rw-r--r--nova/openstack/common/plugin/pluginmanager.py2
-rw-r--r--nova/openstack/common/rpc/common.py2
-rw-r--r--nova/openstack/common/rpc/impl_zmq.py5
-rw-r--r--nova/openstack/common/timeutils.py18
-rw-r--r--nova/policy.py20
-rw-r--r--nova/quota.py219
-rw-r--r--nova/scheduler/chance.py26
-rw-r--r--nova/scheduler/driver.py6
-rw-r--r--nova/scheduler/filter_scheduler.py116
-rw-r--r--nova/scheduler/filters/aggregate_instance_extra_specs.py2
-rw-r--r--nova/scheduler/filters/all_hosts_filter.py2
-rw-r--r--nova/scheduler/filters/compute_capabilities_filter.py43
-rw-r--r--nova/scheduler/filters/compute_filter.py57
-rw-r--r--nova/scheduler/filters/extra_specs_ops.py63
-rw-r--r--nova/scheduler/host_manager.py2
-rw-r--r--nova/scheduler/manager.py14
-rw-r--r--nova/scheduler/rpcapi.py9
-rw-r--r--nova/test.py2
-rw-r--r--nova/tests/api/ec2/test_ec2_validate.py1
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_hosts.py1
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_instance_usage_audit_log.py3
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_keypairs.py37
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_scheduler_hints.py17
-rw-r--r--nova/tests/api/openstack/compute/test_servers.py27
-rw-r--r--nova/tests/compute/test_compute.py76
-rw-r--r--nova/tests/compute/test_compute_utils.py94
-rw-r--r--nova/tests/compute/test_rpcapi.py4
-rw-r--r--nova/tests/console/test_console.py70
-rw-r--r--nova/tests/fake_flags.py1
-rw-r--r--nova/tests/hyperv/__init__.py0
-rw-r--r--nova/tests/hyperv/basetestcase.py96
-rw-r--r--nova/tests/hyperv/db_fakes.py166
-rw-r--r--nova/tests/hyperv/hypervutils.py245
-rw-r--r--nova/tests/hyperv/mockproxy.py234
-rw-r--r--nova/tests/hyperv/stubs/README.rst2
-rw-r--r--nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_os.p.gzbin0 -> 670 bytes
-rw-r--r--nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_subprocess.p.gzbin0 -> 2768 bytes
-rw-r--r--nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_time.p.gzbin0 -> 257 bytes
-rw-r--r--nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_uuid.p.gzbin0 -> 660 bytes
-rw-r--r--nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_os.p.gzbin0 -> 702 bytes
-rw-r--r--nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_subprocess.p.gzbin0 -> 571 bytes
-rw-r--r--nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_time.p.gzbin0 -> 277 bytes
-rw-r--r--nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_uuid.p.gzbin0 -> 652 bytes
-rw-r--r--nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_wmi.p.gzbin0 -> 23220 bytes
-rw-r--r--nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_wmi.p.gzbin0 -> 28631 bytes
-rw-r--r--nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_boot_from_volume_subprocess.p.gzbin0 -> 385 bytes
-rw-r--r--nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_boot_from_volume_time.p.gzbin0 -> 260 bytes
-rw-r--r--nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_boot_from_volume_uuid.p.gzbin0 -> 578 bytes
-rw-r--r--nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_boot_from_volume_wmi.p.gzbin0 -> 20274 bytes
-rw-r--r--nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_detach_volume_os.p.gzbin0 -> 725 bytes
-rw-r--r--nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_detach_volume_subprocess.p.gzbin0 -> 426 bytes
-rw-r--r--nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_detach_volume_time.p.gzbin0 -> 257 bytes
-rw-r--r--nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_detach_volume_uuid.p.gzbin0 -> 660 bytes
-rw-r--r--nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_detach_volume_wmi.p.gzbin0 -> 31833 bytes
-rw-r--r--nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_os.p.gzbin0 -> 726 bytes
-rw-r--r--nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_time.p.gzbin0 -> 250 bytes
-rw-r--r--nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_uuid.p.gzbin0 -> 621 bytes
-rw-r--r--nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_os.p.gzbin0 -> 744 bytes
-rw-r--r--nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_time.p.gzbin0 -> 267 bytes
-rw-r--r--nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_uuid.p.gzbin0 -> 640 bytes
-rw-r--r--nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_wmi.p.gzbin0 -> 25238 bytes
-rw-r--r--nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_wmi.p.gzbin0 -> 29404 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_os.p.gzbin0 -> 722 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_shutil.p.gzbin0 -> 289 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_subprocess.p.gzbin0 -> 2797 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_time.p.gzbin0 -> 276 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_uuid.p.gzbin0 -> 674 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_os.p.gzbin0 -> 755 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_shutil.p.gzbin0 -> 320 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_subprocess.p.gzbin0 -> 591 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_time.p.gzbin0 -> 290 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_uuid.p.gzbin0 -> 658 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_wmi.p.gzbin0 -> 22780 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_wmi.p.gzbin0 -> 28844 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_shutil.p.gzbin0 -> 292 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_subprocess.p.gzbin0 -> 2800 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_time.p.gzbin0 -> 275 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_uuid.p.gzbin0 -> 592 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_wmi.p.gzbin0 -> 19845 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_os.p.gzbin0 -> 748 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_shutil.p.gzbin0 -> 283 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_time.p.gzbin0 -> 253 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_uuid.p.gzbin0 -> 627 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_wmi.p.gzbin0 -> 24040 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_os.p.gzbin0 -> 723 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_shutil.p.gzbin0 -> 289 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_subprocess.p.gzbin0 -> 2798 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_time.p.gzbin0 -> 275 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_uuid.p.gzbin0 -> 671 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_wmi.p.gzbin0 -> 29537 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_os.p.gzbin0 -> 717 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_shutil.p.gzbin0 -> 284 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_time.p.gzbin0 -> 254 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_uuid.p.gzbin0 -> 626 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_wmi.p.gzbin0 -> 23400 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_list_instances_detail_shutil.p.gzbin0 -> 277 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_list_instances_detail_wmi.p.gzbin0 -> 7893 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_list_instances_shutil.p.gzbin0 -> 290 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_list_instances_wmi.p.gzbin0 -> 1300 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_os.p.gzbin0 -> 603 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_shutil.p.gzbin0 -> 290 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_time.p.gzbin0 -> 260 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_uuid.p.gzbin0 -> 631 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_os.p.gzbin0 -> 621 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_shutil.p.gzbin0 -> 310 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_time.p.gzbin0 -> 280 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_uuid.p.gzbin0 -> 649 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_wmi.p.gzbin0 -> 23876 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_wmi.p.gzbin0 -> 26172 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_os.p.gzbin0 -> 728 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_shutil.p.gzbin0 -> 296 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_time.p.gzbin0 -> 266 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_uuid.p.gzbin0 -> 638 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_wmi.p.gzbin0 -> 23490 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_os.p.gzbin0 -> 716 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_shutil.p.gzbin0 -> 281 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_time.p.gzbin0 -> 251 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_uuid.p.gzbin0 -> 624 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_wmi.p.gzbin0 -> 23350 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_os.p.gzbin0 -> 740 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_shutil.p.gzbin0 -> 305 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_time.p.gzbin0 -> 275 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_uuid.p.gzbin0 -> 646 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_wmi.p.gzbin0 -> 23323 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_os.p.gzbin0 -> 719 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_shutil.p.gzbin0 -> 285 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_time.p.gzbin0 -> 255 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_uuid.p.gzbin0 -> 625 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_wmi.p.gzbin0 -> 23258 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_os.p.gzbin0 -> 734 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_shutil.p.gzbin0 -> 300 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_time.p.gzbin0 -> 270 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_uuid.p.gzbin0 -> 640 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_wmi.p.gzbin0 -> 23305 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_os.p.gzbin0 -> 718 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_shutil.p.gzbin0 -> 284 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_time.p.gzbin0 -> 254 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_uuid.p.gzbin0 -> 626 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_wmi.p.gzbin0 -> 23962 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_os.p.gzbin0 -> 536 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_shutil.p.gzbin0 -> 304 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_time.p.gzbin0 -> 273 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_uuid.p.gzbin0 -> 335 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_wmi.p.gzbin0 -> 1382 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_shutil.p.gzbin0 -> 307 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_uuid.p.gzbin0 -> 337 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_wmi.p.gzbin0 -> 849 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_os.p.gzbin0 -> 717 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_shutil.p.gzbin0 -> 282 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_time.p.gzbin0 -> 252 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_uuid.p.gzbin0 -> 623 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_wmi.p.gzbin0 -> 23931 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_os.p.gzbin0 -> 733 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_shutil.p.gzbin0 -> 298 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_time.p.gzbin0 -> 268 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_uuid.p.gzbin0 -> 640 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_wmi.p.gzbin0 -> 23341 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_os.p.gzbin0 -> 716 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_shutil.p.gzbin0 -> 282 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_time.p.gzbin0 -> 252 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_uuid.p.gzbin0 -> 623 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_wmi.p.gzbin0 -> 24291 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_os.p.gzbin0 -> 1012 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_shutil.p.gzbin0 -> 416 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_time.p.gzbin0 -> 254 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_uuid.p.gzbin0 -> 667 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_os.p.gzbin0 -> 1033 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_shutil.p.gzbin0 -> 437 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_time.p.gzbin0 -> 274 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_uuid.p.gzbin0 -> 688 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_wmi.p.gzbin0 -> 24794 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_wmi.p.gzbin0 -> 24505 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_os.p.gzbin0 -> 724 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_shutil.p.gzbin0 -> 291 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_time.p.gzbin0 -> 261 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_uuid.p.gzbin0 -> 631 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_wmi.p.gzbin0 -> 24716 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_os.p.gzbin0 -> 607 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_shutil.p.gzbin0 -> 294 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_time.p.gzbin0 -> 264 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_uuid.p.gzbin0 -> 635 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_wmi.p.gzbin0 -> 24420 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_os.p.gzbin0 -> 737 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_shutil.p.gzbin0 -> 302 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_time.p.gzbin0 -> 271 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_uuid.p.gzbin0 -> 558 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_wmi.p.gzbin0 -> 17307 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_os.p.gzbin0 -> 734 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_shutil.p.gzbin0 -> 301 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_time.p.gzbin0 -> 271 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_uuid.p.gzbin0 -> 643 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_wmi.p.gzbin0 -> 24133 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_os.p.gzbin0 -> 717 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_shutil.p.gzbin0 -> 283 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_time.p.gzbin0 -> 253 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_uuid.p.gzbin0 -> 623 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_wmi.p.gzbin0 -> 23864 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_os.p.gzbin0 -> 735 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_shutil.p.gzbin0 -> 299 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_time.p.gzbin0 -> 269 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_uuid.p.gzbin0 -> 640 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_wmi.p.gzbin0 -> 23690 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_os.p.gzbin0 -> 717 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_shutil.p.gzbin0 -> 283 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_time.p.gzbin0 -> 253 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_uuid.p.gzbin0 -> 626 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_wmi.p.gzbin0 -> 24099 bytes
-rw-r--r--nova/tests/network/test_api.py1
-rw-r--r--nova/tests/policy.json4
-rw-r--r--nova/tests/scheduler/test_filter_scheduler.py8
-rw-r--r--nova/tests/scheduler/test_host_filters.py535
-rw-r--r--nova/tests/scheduler/test_host_manager.py1
-rw-r--r--nova/tests/scheduler/test_rpcapi.py14
-rw-r--r--nova/tests/scheduler/test_scheduler.py1
-rw-r--r--nova/tests/test_configdrive2.py1
-rw-r--r--nova/tests/test_context.py6
-rw-r--r--nova/tests/test_hypervapi.py463
-rw-r--r--nova/tests/test_iptables_network.py60
-rw-r--r--nova/tests/test_libvirt.py76
-rw-r--r--nova/tests/test_metadata.py3
-rw-r--r--nova/tests/test_misc.py80
-rw-r--r--nova/tests/test_plugin_api_extensions.py2
-rw-r--r--nova/tests/test_policy.py5
-rw-r--r--nova/tests/test_quota.py607
-rw-r--r--nova/tests/test_service.py1
-rw-r--r--nova/tests/test_storwize_svc.py1
-rw-r--r--nova/tests/test_utils.py3
-rw-r--r--nova/tests/test_volume.py25
-rw-r--r--nova/tests/test_xenapi.py27
-rw-r--r--nova/tests/test_xensm.py4
-rw-r--r--nova/tests/xenapi/stubs.py10
-rw-r--r--nova/utils.py25
-rw-r--r--nova/virt/configdrive.py3
-rw-r--r--nova/virt/driver.py7
-rw-r--r--nova/virt/fake.py7
-rw-r--r--nova/virt/hyperv/README.rst44
-rw-r--r--nova/virt/hyperv/__init__.py0
-rw-r--r--nova/virt/hyperv/baseops.py61
-rw-r--r--nova/virt/hyperv/constants.py54
-rw-r--r--nova/virt/hyperv/driver.py227
-rw-r--r--nova/virt/hyperv/ioutils.py26
-rw-r--r--nova/virt/hyperv/livemigrationops.py162
-rw-r--r--nova/virt/hyperv/snapshotops.py187
-rw-r--r--nova/virt/hyperv/vmops.py650
-rw-r--r--nova/virt/hyperv/vmutils.py146
-rw-r--r--nova/virt/hyperv/volumeops.py297
-rw-r--r--nova/virt/hyperv/volumeutils.py122
-rw-r--r--nova/virt/libvirt/config.py8
-rw-r--r--nova/virt/libvirt/driver.py68
-rw-r--r--nova/virt/libvirt/utils.py1
-rw-r--r--nova/virt/powervm/driver.py3
-rw-r--r--nova/virt/vmwareapi/driver.py3
-rw-r--r--nova/virt/xenapi/driver.py3
-rw-r--r--nova/virt/xenapi/vmops.py23
-rw-r--r--nova/volume/netapp.py1
-rw-r--r--nova/volume/storwize_svc.py1
-rwxr-xr-xrun_tests.sh6
-rwxr-xr-xtools/hacking.py9
-rw-r--r--tox.ini4
298 files changed, 5045 insertions, 2148 deletions
diff --git a/bin/nova-manage b/bin/nova-manage
index 7005c7fcf..9a09a8b5a 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -224,10 +224,6 @@ def _db_error(caught_exception):
class ProjectCommands(object):
"""Class for managing projects."""
- @args('--project', dest="project_id", metavar='<Project name>',
- help='Project name')
- @args('--key', dest="key", metavar='<key>', help='Key')
- @args('--value', dest="value", metavar='<value>', help='Value')
def quota(self, project_id, key=None, value=None):
"""Set or display quotas for project"""
ctxt = context.get_admin_context()
@@ -260,52 +256,6 @@ class ProjectCommands(object):
AccountCommands = ProjectCommands
-class QuotaCommands(object):
- """Class for managing quotas."""
-
- @args('--project', dest="project_id", metavar='<Project name>',
- help='Project name')
- @args('--key', dest="key", metavar='<key>', help='Key')
- @args('--value', dest="value", metavar='<value>', help='Value')
- def project(self, project_id, key=None, value=None):
- """Set or display quotas for project"""
- ctxt = context.get_admin_context()
- if key:
- if value.lower() == 'unlimited':
- value = None
- try:
- db.quota_update(ctxt, project_id, key, value)
- except exception.ProjectQuotaNotFound:
- db.quota_create(ctxt, project_id, key, value)
- project_quota = QUOTAS.get_project_quotas(ctxt, project_id)
- for key, value in project_quota.iteritems():
- if value['limit'] < 0 or value['limit'] is None:
- value['limit'] = 'unlimited'
- print '%s: %s' % (key, value['limit'])
-
- @args('--user', dest="user_id", metavar='<User name>',
- help='User name')
- @args('--project', dest="project_id", metavar='<Project name>',
- help='Project name')
- @args('--key', dest="key", metavar='<key>', help='Key')
- @args('--value', dest="value", metavar='<value>', help='Value')
- def user(self, user_id, project_id, key=None, value=None):
- """Set or display quotas for user"""
- ctxt = context.get_admin_context()
- if key:
- if value.lower() == 'unlimited':
- value = None
- try:
- db.quota_update_for_user(ctxt, user_id, project_id, key, value)
- except exception.UserQuotaNotFound:
- db.quota_create_for_user(ctxt, user_id, project_id, key, value)
- user_quota = QUOTAS.get_user_quotas(ctxt, user_id, project_id)
- for key, value in user_quota.iteritems():
- if value['limit'] < 0 or value['limit'] is None:
- value['limit'] = 'unlimited'
- print '%s: %s' % (key, value['limit'])
-
-
class FixedIpCommands(object):
"""Class for managing fixed ip."""
@@ -1364,7 +1314,6 @@ CATEGORIES = [
('logs', GetLogCommands),
('network', NetworkCommands),
('project', ProjectCommands),
- ('quota', QuotaCommands),
('service', ServiceCommands),
('shell', ShellCommands),
('sm', StorageManagerCommands),
diff --git a/doc/source/devref/filter_scheduler.rst b/doc/source/devref/filter_scheduler.rst
index 91cf0e5a0..d6ceb08ef 100644
--- a/doc/source/devref/filter_scheduler.rst
+++ b/doc/source/devref/filter_scheduler.rst
@@ -26,26 +26,35 @@ filtering`.
There are some standard filter classes to use (:mod:`nova.scheduler.filters`):
* |AllHostsFilter| - frankly speaking, this filter does no operation. It
- returns all the available hosts after its work.
-* |AvailabilityZoneFilter| - filters hosts by availability zone. It returns
- hosts with the same availability zone as the requested instance has in its
- properties.
-* |ComputeFilter| - checks that the capabilities provided by the compute
- service satisfy the extra specifications, associated with the instance type.
- It returns a list of hosts that can create instance type.
-* |CoreFilter| - filters based on CPU core utilization. It will approve host if
- it has sufficient number of CPU cores.
+ passes all the available hosts.
+* |ArchFilter| - filters hosts based on architecture. It passes hosts
+ that can support the architecture specified in the instance properties.
+* |AvailabilityZoneFilter| - filters hosts by availability zone. It passes
+ hosts matching the availability zone specfied in the instance properties.
+* |ComputeCapabilityFilter| - checks that the capabilities provided by the
+ host compute service satisfy any extra specifications associated with the
+ instance type. It passes hosts that can create the specified instance type.
+* |ComputeFilter| - passes all hosts that are operational and enabled.
+* |CoreFilter| - filters based on CPU core utilization. It passes hosts with
+ sufficient number of CPU cores.
* |IsolatedHostsFilter| - filter based on "image_isolated" and "host_isolated"
flags.
* |JsonFilter| - allows simple JSON-based grammar for selecting hosts.
-* |RamFilter| - filters hosts by their RAM. So, it returns only the hosts with
- enough available RAM.
+* |RamFilter| - filters hosts by their RAM. Only hosts with sufficient RAM
+ to host the instance are passed.
* |SimpleCIDRAffinityFilter| - allows to put a new instance on a host within
the same IP block.
* |DifferentHostFilter| - allows to put the instance on a different host from a
set of instances.
* |SameHostFilter| - puts the instance on the same host as another instance in
a set of of instances.
+* |RetryFilter| - filters hosts that have been attempted for scheduling.
+ Only passes hosts that have not been previously attempted.
+* |TrustedFilter| - filters hosts based on their trust. Only passes hosts
+ that meet the trust requirements sepcified in the instance properties.
+* |TypeAffinityFilter| - Only passes hosts that are not already running an
+ instance of the requested type.
+* |AggregateTypeAffinityFilter| - limits instance_type by aggregate.
Now we can focus on these standard filter classes in details. I will pass the
simplest ones, such as |AllHostsFilter|, |CoreFilter| and |RamFilter| are,
@@ -77,15 +86,19 @@ scheduler with availability zones support and can configure availability zones
on each compute host. This classes method `host_passes` returns `True` if
availability zone mentioned in request is the same on the current compute host.
-|ComputeFilter| checks if host can create `instance_type`. Let's note that
-instance types describe the compute, memory and storage capacity of nova
-compute nodes, it is the list of characteristics such as number of vCPUs,
-amount RAM and so on. So |ComputeFilter| looks at hosts' capabilities (host
-without requested specifications can't be chosen for the creating of the
-instance), checks if the hosts service is up based on last heartbeat. Finally,
-this Scheduler can verify if host satisfies some `extra specifications`
-associated with the instance type (of course if there are no such extra
-specifications, every host suits them).
+The |ArchFilter| filters hosts based on the architecture specified in the
+instance properties. E.g., an instance might require a host that supports
+the arm architecture. The |ArchFilter| will only pass hosts that can
+support the architecture requested by the instance.
+
+|ComputeCapabilitesFilter| checks if the host satisfies any 'extra specs'
+specfied on the instance type. The 'extra specs' can contain key/value pairs,
+and the |ComputeCapabilitiesFilter| will only pass hosts whose capabilities
+satisfy the requested specifications. All hosts are passed if no 'extra specs'
+are specified.
+
+|ComputeFilter| is quite simple and passes any host whose compute service is
+enabled and operational.
Now we are going to |IsolatedHostsFilter|. There can be some special hosts
reserved for specific images. These hosts are called **isolated**. So the
@@ -125,22 +138,33 @@ Many filters use data from `scheduler_hints`, that is defined in the moment of
creation of the new server for the user. The only exeption for this rule is
|JsonFilter|, that takes data in some strange difficult to understand way.
+The |RetryFilter| filters hosts that have already been attempted for scheduling.
+It only passes hosts that have not been previously attempted.
+
+The |TrustedFilter| filters hosts based on their trust. Only passes hosts
+that match the trust requested in the `extra_specs' for the flavor. The
+`extra_specs' will contain a key/value pair where the key is `trust'. The
+value of this pair (`trusted'/`untrusted') must match the integrity of a
+host (obtained from the Attestation service) before it is passed by the
+|TrustedFilter|.
+
To use filters you specify next two settings:
-* `scheduler_available_filters` - points available filters.
-* `scheduler_default_filters` - points filters to be used by default from the
- list of available ones.
+* `scheduler_available_filters` - Defines filter classes made available to the
+ scheduler. This setting can be used multiple times.
+* `scheduler_default_filters` - Of the available filters, defines those that
+ the scheduler uses by default.
-Host Manager sets up these flags in `nova.conf` by default on the next values:
+The default values for these settings in nova.conf are:
::
--scheduler_available_filters=nova.scheduler.filters.standard_filters
- --scheduler_default_filters=RamFilter,ComputeFilter,AvailabilityZoneFilter
+ --scheduler_default_filters=RamFilter,ComputeFilter,AvailabilityZoneFilter,ComputeCapabilityFilter
-These two lines mean, that all the filters in the `nova.scheduler.filters`
-would be available, and the default ones would be |RamFilter|, |ComputeFilter|
-and |AvailabilityZoneFilter|.
+With this configuration, all filters in `nova.scheduler.filters`
+would be available, and by default the |RamFilter|, |ComputeFilter|,
+|AvailabilityZoneFilter|, and |ComputeCapabilityFilter| would be used.
If you want to create **your own filter** you just need to inherit from
|BaseHostFilter| and implement one method:
@@ -148,7 +172,8 @@ If you want to create **your own filter** you just need to inherit from
takes `host_state` (describes host) and `filter_properties` dictionary as the
parameters.
-So in the end file nova.conf should contain lines like these:
+As an example, nova.conf could contain the following scheduler-related
+settings:
::
@@ -157,10 +182,10 @@ So in the end file nova.conf should contain lines like these:
--scheduler_available_filters=myfilter.MyFilter
--scheduler_default_filters=RamFilter,ComputeFilter,MyFilter
-As you see, flag `scheduler_driver` is set up for the `FilterSchedule`,
-available filters can be specified more than once and description of the
-default filters should not contain full paths with class names you need, only
-class names.
+With these settings, nova will use the `FilterScheduler` for the scheduler
+driver. The standard nova filters and MyFilter are available to the
+FilterScheduler. The RamFilter, ComputeFilter, and MyFilter are used by
+default when no filters are specified in the request.
Costs and weights
-----------------
@@ -253,8 +278,10 @@ P.S.: you can find more examples of using Filter Scheduler and standard filters
in :mod:`nova.tests.scheduler`.
.. |AllHostsFilter| replace:: :class:`AllHostsFilter <nova.scheduler.filters.all_hosts_filter.AllHostsFilter>`
+.. |ArchFilter| replace:: :class:`ArchFilter <nova.scheduler.filters.arch_filter.ArchFilter>`
.. |AvailabilityZoneFilter| replace:: :class:`AvailabilityZoneFilter <nova.scheduler.filters.availability_zone_filter.AvailabilityZoneFilter>`
.. |BaseHostFilter| replace:: :class:`BaseHostFilter <nova.scheduler.filters.BaseHostFilter>`
+.. |ComputeCapabilitiesFilter| replace:: :class:`ComputeCapabilitiesFilter <nova.scheduler.filters.compute_capabilities_filter.ComputeCapabilitiesFilter>`
.. |ComputeFilter| replace:: :class:`ComputeFilter <nova.scheduler.filters.compute_filter.ComputeFilter>`
.. |CoreFilter| replace:: :class:`CoreFilter <nova.scheduler.filters.core_filter.CoreFilter>`
.. |IsolatedHostsFilter| replace:: :class:`IsolatedHostsFilter <nova.scheduler.filters.isolated_hosts_filter>`
@@ -263,3 +290,7 @@ in :mod:`nova.tests.scheduler`.
.. |SimpleCIDRAffinityFilter| replace:: :class:`SimpleCIDRAffinityFilter <nova.scheduler.filters.affinity_filter.SimpleCIDRAffinityFilter>`
.. |DifferentHostFilter| replace:: :class:`DifferentHostFilter <nova.scheduler.filters.affinity_filter.DifferentHostFilter>`
.. |SameHostFilter| replace:: :class:`SameHostFilter <nova.scheduler.filters.affinity_filter.SameHostFilter>`
+.. |RetryFilter| replace:: :class:`RetryFilter <nova.scheduler.filters.retry_filter.RetryFilter>`
+.. |TrustedFilter| replace:: :class:`TrustedFilter <nova.scheduler.filters.trusted_filter.TrustedFilter>`
+.. |TypeAffinityFilter| replace:: :class:`TypeAffinityFilter <nova.scheduler.filters.type_filter.TypeAffinityFilter>`
+.. |AggregateTypeAffinityFilter| replace:: :class:`AggregateTypeAffinityFilter <nova.scheduler.filters.type_filter.AggregateTypeAffinityFilter>`
diff --git a/etc/nova/nova.conf.sample b/etc/nova/nova.conf.sample
index 78069f792..5fba9a816 100644
--- a/etc/nova/nova.conf.sample
+++ b/etc/nova/nova.conf.sample
@@ -1211,7 +1211,7 @@
#### "nova.scheduler.filters.standard_filters" maps to all
#### filters included with nova.
-# scheduler_default_filters=AvailabilityZoneFilter,RamFilter,ComputeFilter
+# scheduler_default_filters=AvailabilityZoneFilter,RamFilter,ComputeFilter,ComputeCapabilitiesFilter
#### (ListOpt) Which filter class names to use for filtering hosts when not
#### specified in the request.
diff --git a/etc/nova/policy.json b/etc/nova/policy.json
index 72390c75e..02bf503cd 100644
--- a/etc/nova/policy.json
+++ b/etc/nova/policy.json
@@ -1,6 +1,6 @@
{
+ "admin": [["role:admin"]],
"admin_or_owner": [["role:admin"], ["project_id:%(project_id)s"]],
- "admin_or_projectadmin": [["role:projectadmin"], ["role:admin"]],
"default": [["rule:admin_or_owner"]],
@@ -49,8 +49,7 @@
"compute_extension:networks": [["rule:admin_api"]],
"compute_extension:networks:view": [],
"compute_extension:quotas:show": [],
- "compute_extension:quotas:update_for_project": [["rule:admin_api"]],
- "compute_extension:quotas:update_for_user": [["rule:admin_or_projectadmin"]],
+ "compute_extension:quotas:update": [["rule:admin_api"]],
"compute_extension:quota_classes": [],
"compute_extension:rescue": [],
"compute_extension:security_groups": [],
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index 80f3a5012..834b5b7e2 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -1163,7 +1163,7 @@ class CloudController(object):
return {'return': "true"}
def run_instances(self, context, **kwargs):
- max_count = int(kwargs.get('max_count', 1))
+ min_count = int(kwargs.get('min_count', 1))
if kwargs.get('kernel_id'):
kernel = self._get_image(context, kwargs['kernel_id'])
kwargs['kernel_id'] = ec2utils.id_to_glance_id(context,
@@ -1190,8 +1190,8 @@ class CloudController(object):
instance_type=instance_types.get_instance_type_by_name(
kwargs.get('instance_type', None)),
image_href=image_uuid,
- min_count=int(kwargs.get('min_count', max_count)),
- max_count=max_count,
+ max_count=int(kwargs.get('max_count', min_count)),
+ min_count=min_count,
kernel_id=kwargs.get('kernel_id'),
ramdisk_id=kwargs.get('ramdisk_id'),
key_name=kwargs.get('key_name'),
diff --git a/nova/api/ec2/ec2utils.py b/nova/api/ec2/ec2utils.py
index e1ea9c6d8..4a7e574ad 100644
--- a/nova/api/ec2/ec2utils.py
+++ b/nova/api/ec2/ec2utils.py
@@ -138,7 +138,7 @@ def id_to_ec2_inst_id(instance_id):
def ec2_inst_id_to_uuid(context, ec2_id):
- """"Convert an instance id to uuid."""
+ """"Convert an instance id to uuid."""
int_id = ec2_id_to_id(ec2_id)
return get_instance_uuid_from_int_id(context, int_id)
@@ -168,7 +168,7 @@ def id_to_ec2_vol_id(volume_id):
def ec2_vol_id_to_uuid(ec2_id):
- """Get the cooresponding UUID for the given ec2-id."""
+ """Get the corresponding UUID for the given ec2-id."""
ctxt = context.get_admin_context()
# NOTE(jgriffith) first strip prefix to get just the numeric
@@ -199,7 +199,7 @@ def get_volume_uuid_from_int_id(context, int_id):
def ec2_snap_id_to_uuid(ec2_id):
- """Get the cooresponding UUID for the given ec2-id."""
+ """Get the corresponding UUID for the given ec2-id."""
ctxt = context.get_admin_context()
# NOTE(jgriffith) first strip prefix to get just the numeric
diff --git a/nova/api/metadata/base.py b/nova/api/metadata/base.py
index aa18eceb0..d9710dc37 100644
--- a/nova/api/metadata/base.py
+++ b/nova/api/metadata/base.py
@@ -45,11 +45,6 @@ flags.DECLARE('dhcp_domain', 'nova.network.manager')
FLAGS.register_opts(metadata_opts)
-_DEFAULT_MAPPINGS = {'ami': 'sda1',
- 'ephemeral0': 'sda2',
- 'root': block_device.DEFAULT_ROOT_DEV_NAME,
- 'swap': 'sda3'}
-
VERSIONS = [
'1.0',
'2007-01-19',
@@ -387,50 +382,8 @@ def get_metadata_by_address(address):
def _format_instance_mapping(ctxt, instance):
- root_device_name = instance['root_device_name']
- if root_device_name is None:
- return _DEFAULT_MAPPINGS
-
- mappings = {}
- mappings['ami'] = block_device.strip_dev(root_device_name)
- mappings['root'] = root_device_name
- default_ephemeral_device = instance.get('default_ephemeral_device')
- if default_ephemeral_device:
- mappings['ephemeral0'] = default_ephemeral_device
- default_swap_device = instance.get('default_swap_device')
- if default_swap_device:
- mappings['swap'] = default_swap_device
- ebs_devices = []
-
- # 'ephemeralN', 'swap' and ebs
- for bdm in db.block_device_mapping_get_all_by_instance(
- ctxt, instance['uuid']):
- if bdm['no_device']:
- continue
-
- # ebs volume case
- if (bdm['volume_id'] or bdm['snapshot_id']):
- ebs_devices.append(bdm['device_name'])
- continue
-
- virtual_name = bdm['virtual_name']
- if not virtual_name:
- continue
-
- if block_device.is_swap_or_ephemeral(virtual_name):
- mappings[virtual_name] = bdm['device_name']
-
- # NOTE(yamahata): I'm not sure how ebs device should be numbered.
- # Right now sort by device name for deterministic
- # result.
- if ebs_devices:
- nebs = 0
- ebs_devices.sort()
- for ebs in ebs_devices:
- mappings['ebs%d' % nebs] = ebs
- nebs += 1
-
- return mappings
+ bdms = db.block_device_mapping_get_all_by_instance(ctxt, instance['uuid'])
+ return block_device.instance_block_mapping(instance, bdms)
def ec2_md_print(data):
diff --git a/nova/api/openstack/compute/contrib/disk_config.py b/nova/api/openstack/compute/contrib/disk_config.py
index 961457c46..293be7415 100644
--- a/nova/api/openstack/compute/contrib/disk_config.py
+++ b/nova/api/openstack/compute/contrib/disk_config.py
@@ -139,7 +139,8 @@ class ServerDiskConfigController(wsgi.Controller):
def create(self, req, body):
context = req.environ['nova.context']
if authorize(context):
- self._set_disk_config(body['server'])
+ if 'server' in body:
+ self._set_disk_config(body['server'])
resp_obj = (yield)
self._show(req, resp_obj)
diff --git a/nova/api/openstack/compute/contrib/keypairs.py b/nova/api/openstack/compute/contrib/keypairs.py
index db503ffdd..ab264f9da 100644
--- a/nova/api/openstack/compute/contrib/keypairs.py
+++ b/nova/api/openstack/compute/contrib/keypairs.py
@@ -108,6 +108,15 @@ class KeypairController(object):
raise webob.exc.HTTPNotFound()
return webob.Response(status_int=202)
+ @wsgi.serializers(xml=KeypairTemplate)
+ def show(self, req, id):
+ """Return data for the given key name."""
+ context = req.environ['nova.context']
+ authorize(context)
+
+ keypair = self.api.get_key_pair(context, context.user_id, id)
+ return {'keypair': keypair}
+
@wsgi.serializers(xml=KeypairsTemplate)
def index(self, req):
"""
diff --git a/nova/api/openstack/compute/contrib/quotas.py b/nova/api/openstack/compute/contrib/quotas.py
index 56583ff79..33584badc 100644
--- a/nova/api/openstack/compute/contrib/quotas.py
+++ b/nova/api/openstack/compute/contrib/quotas.py
@@ -15,7 +15,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import urlparse
import webob
from nova.api.openstack import extensions
@@ -25,15 +24,13 @@ from nova import db
from nova.db.sqlalchemy import api as sqlalchemy_api
from nova import exception
from nova import quota
-from nova import utils
QUOTAS = quota.QUOTAS
-def authorize_action(context, action_name):
- action = 'quotas:%s' % action_name
- extensions.extension_authorizer('compute', action)(context)
+authorize_update = extensions.extension_authorizer('compute', 'quotas:update')
+authorize_show = extensions.extension_authorizer('compute', 'quotas:show')
class QuotaTemplate(xmlutil.TemplateBuilder):
@@ -60,102 +57,51 @@ class QuotaSetsController(object):
return dict(quota_set=result)
- def _validate_quota_limit(self, limit, remain, quota):
+ def _validate_quota_limit(self, limit):
# NOTE: -1 is a flag value for unlimited
if limit < -1:
msg = _("Quota limit must be -1 or greater.")
raise webob.exc.HTTPBadRequest(explanation=msg)
- # Quota limit must be less than the remains of the project.
- if remain != -1 and remain < limit - quota:
- msg = _("Quota limit exceed the remains of the project.")
- raise webob.exc.HTTPBadRequest(explanation=msg)
-
- def _get_quotas(self, context, id, user_id=None, remaining=False,
- usages=False):
- # Get the remaining quotas for a project.
- if remaining:
- values = QUOTAS.get_remaining_quotas(context, id)
- return values
-
- if user_id:
- # If user_id, return quotas for the given user.
- values = QUOTAS.get_user_quotas(context, user_id, id,
- usages=usages)
- else:
- values = QUOTAS.get_project_quotas(context, id, usages=usages)
+ def _get_quotas(self, context, id, usages=False):
+ values = QUOTAS.get_project_quotas(context, id, usages=usages)
if usages:
return values
else:
return dict((k, v['limit']) for k, v in values.items())
- def _request_params(self, req):
- qs = req.environ.get('QUERY_STRING', '')
- return urlparse.parse_qs(qs)
-
@wsgi.serializers(xml=QuotaTemplate)
def show(self, req, id):
context = req.environ['nova.context']
- authorize_action(context, 'show')
- params = self._request_params(req)
- remaining = False
- if 'remaining' in params:
- remaining = utils.bool_from_str(params["remaining"][0])
- user_id = None
- if 'user_id' in params:
- user_id = params["user_id"][0]
+ authorize_show(context)
try:
sqlalchemy_api.authorize_project_context(context, id)
- return self._format_quota_set(id,
- self._get_quotas(context, id, user_id, remaining))
+ return self._format_quota_set(id, self._get_quotas(context, id))
except exception.NotAuthorized:
raise webob.exc.HTTPForbidden()
@wsgi.serializers(xml=QuotaTemplate)
def update(self, req, id, body):
context = req.environ['nova.context']
- params = self._request_params(req)
+ authorize_update(context)
project_id = id
- user_id = None
- remains = {}
- quotas = {}
- if 'user_id' in params:
- # Project admins are able to modify per-user quotas.
- authorize_action(context, 'update_for_user')
- user_id = params["user_id"][0]
- remains = self._get_quotas(context, project_id, remaining=True)
- quotas = db.quota_get_all_by_user(context, user_id, project_id)
- else:
- # Only admins are able to modify per-project quotas.
- authorize_action(context, 'update_for_project')
-
for key in body['quota_set'].keys():
if key in QUOTAS:
value = int(body['quota_set'][key])
+ self._validate_quota_limit(value)
try:
- if user_id:
- self._validate_quota_limit(value, remains.get(key, 0),
- quotas.get(key, 0))
- db.quota_update_for_user(context, user_id,
- project_id, key, value)
- else:
- self._validate_quota_limit(value, remains.get(key, -1),
- quotas.get(key, 0))
- db.quota_update(context, project_id, key, value)
+ db.quota_update(context, project_id, key, value)
except exception.ProjectQuotaNotFound:
db.quota_create(context, project_id, key, value)
- except exception.UserQuotaNotFound:
- db.quota_create_for_user(context, user_id,
- project_id, key, value)
except exception.AdminRequired:
raise webob.exc.HTTPForbidden()
- return {'quota_set': self._get_quotas(context, id, user_id)}
+ return {'quota_set': self._get_quotas(context, id)}
@wsgi.serializers(xml=QuotaTemplate)
def defaults(self, req, id):
context = req.environ['nova.context']
- authorize_action(context, 'show')
+ authorize_show(context)
return self._format_quota_set(id, QUOTAS.get_defaults(context))
diff --git a/nova/api/openstack/compute/contrib/scheduler_hints.py b/nova/api/openstack/compute/contrib/scheduler_hints.py
index 4bff779a5..e8d65a741 100644
--- a/nova/api/openstack/compute/contrib/scheduler_hints.py
+++ b/nova/api/openstack/compute/contrib/scheduler_hints.py
@@ -49,10 +49,7 @@ class SchedulerHintsController(wsgi.Controller):
if 'server' in body:
body['server']['scheduler_hints'] = hints
- yield
- else:
- msg = _("Missing server attribute")
- raise webob.exc.HTTPBadRequest(reason=msg)
+ yield
class Scheduler_hints(extensions.ExtensionDescriptor):
diff --git a/nova/api/openstack/compute/contrib/volumes.py b/nova/api/openstack/compute/contrib/volumes.py
index e566a95f7..99d713cef 100644
--- a/nova/api/openstack/compute/contrib/volumes.py
+++ b/nova/api/openstack/compute/contrib/volumes.py
@@ -339,7 +339,7 @@ class VolumeAttachmentController(object):
raise exc.HTTPUnprocessableEntity()
volume_id = body['volumeAttachment']['volumeId']
- device = body['volumeAttachment']['device']
+ device = body['volumeAttachment'].get('device')
msg = _("Attach volume %(volume_id)s to instance %(server_id)s"
" at %(device)s") % locals()
@@ -347,15 +347,17 @@ class VolumeAttachmentController(object):
try:
instance = self.compute_api.get(context, server_id)
- self.compute_api.attach_volume(context, instance,
- volume_id, device)
+ device = self.compute_api.attach_volume(context, instance,
+ volume_id, device)
except exception.NotFound:
raise exc.HTTPNotFound()
# The attach is async
attachment = {}
attachment['id'] = volume_id
+ attachment['serverId'] = server_id
attachment['volumeId'] = volume_id
+ attachment['device'] = device
# NOTE(justinsb): And now, we have a problem...
# The attach is async, so there's a window in which we don't see
diff --git a/nova/api/openstack/compute/limits.py b/nova/api/openstack/compute/limits.py
index 990c08a10..c0ef65670 100644
--- a/nova/api/openstack/compute/limits.py
+++ b/nova/api/openstack/compute/limits.py
@@ -23,7 +23,6 @@ import httplib
import math
import re
import time
-import urlparse
import webob.dec
import webob.exc
@@ -86,18 +85,8 @@ class LimitsController(object):
Return all global and rate limit information.
"""
context = req.environ['nova.context']
- qs = req.environ.get('QUERY_STRING', '')
- params = urlparse.parse_qs(qs)
- if 'user_id' in params:
- user_id = params["user_id"][0]
- quotas = QUOTAS.get_user_quotas(context, user_id,
- context.project_id,
- usages=False)
- else:
- quotas = QUOTAS.get_project_quotas(context,
- context.project_id,
- usages=False)
-
+ quotas = QUOTAS.get_project_quotas(context, context.project_id,
+ usages=False)
abs_limits = dict((k, v['limit']) for k, v in quotas.items())
rate_limits = req.environ.get("nova.limits", [])
diff --git a/nova/block_device.py b/nova/block_device.py
index aec981933..fbb935d7c 100644
--- a/nova/block_device.py
+++ b/nova/block_device.py
@@ -19,6 +19,10 @@ import re
DEFAULT_ROOT_DEV_NAME = '/dev/sda1'
+_DEFAULT_MAPPINGS = {'ami': 'sda1',
+ 'ephemeral0': 'sda2',
+ 'root': DEFAULT_ROOT_DEV_NAME,
+ 'swap': 'sda3'}
def properties_root_device_name(properties):
@@ -81,3 +85,49 @@ def strip_prefix(device_name):
""" remove both leading /dev/ and xvd or sd or vd """
device_name = strip_dev(device_name)
return _pref.sub('', device_name)
+
+
+def instance_block_mapping(instance, bdms):
+ root_device_name = instance['root_device_name']
+ if root_device_name is None:
+ return _DEFAULT_MAPPINGS
+
+ mappings = {}
+ mappings['ami'] = strip_dev(root_device_name)
+ mappings['root'] = root_device_name
+ default_ephemeral_device = instance.get('default_ephemeral_device')
+ if default_ephemeral_device:
+ mappings['ephemeral0'] = default_ephemeral_device
+ default_swap_device = instance.get('default_swap_device')
+ if default_swap_device:
+ mappings['swap'] = default_swap_device
+ ebs_devices = []
+
+ # 'ephemeralN', 'swap' and ebs
+ for bdm in bdms:
+ if bdm['no_device']:
+ continue
+
+ # ebs volume case
+ if (bdm['volume_id'] or bdm['snapshot_id']):
+ ebs_devices.append(bdm['device_name'])
+ continue
+
+ virtual_name = bdm['virtual_name']
+ if not virtual_name:
+ continue
+
+ if is_swap_or_ephemeral(virtual_name):
+ mappings[virtual_name] = bdm['device_name']
+
+ # NOTE(yamahata): I'm not sure how ebs device should be numbered.
+ # Right now sort by device name for deterministic
+ # result.
+ if ebs_devices:
+ nebs = 0
+ ebs_devices.sort()
+ for ebs in ebs_devices:
+ mappings['ebs%d' % nebs] = ebs
+ nebs += 1
+
+ return mappings
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 2d62c00ed..39b43594a 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -102,7 +102,7 @@ def check_instance_lock(function):
if instance['locked'] and not context.is_admin:
raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
# NOTE(danms): at this point, we have verified that either
- # theinstance is not locked, or the user is suffiently endowed
+ # the instance is not locked, or the user is sufficiently endowed
# that it doesn't matter. While the following statement may be
# interpreted as the "the instance is not locked", it actually
# refers to the whole condition.
@@ -373,8 +373,7 @@ class API(base.Base):
access_ip_v4, access_ip_v6,
requested_networks, config_drive,
block_device_mapping, auto_disk_config,
- reservation_id=None, create_instance_here=False,
- scheduler_hints=None):
+ reservation_id=None, scheduler_hints=None):
"""Verify all the input parameters regardless of the provisioning
strategy being performed and schedule the instance(s) for
creation."""
@@ -495,45 +494,48 @@ class API(base.Base):
LOG.debug(_("Going to run %s instances...") % num_instances)
- if create_instance_here:
- instance = self.create_db_entry_for_new_instance(
- context, instance_type, image, base_options,
- security_group, block_device_mapping,
- quota_reservations)
-
- # Reservations committed; don't double-commit
- quota_reservations = None
-
- # Tells scheduler we created the instance already.
- base_options['uuid'] = instance['uuid']
- use_call = False
- else:
- # We need to wait for the scheduler to create the instance
- # DB entries, because the instance *could* be # created in
- # a child zone.
- use_call = True
-
filter_properties = dict(scheduler_hints=scheduler_hints)
if context.is_admin and forced_host:
filter_properties['force_hosts'] = [forced_host]
- # TODO(comstud): We should use rpc.multicall when we can
- # retrieve the full instance dictionary from the scheduler.
- # Otherwise, we could exceed the AMQP max message size limit.
- # This would require the schedulers' schedule_run_instances
- # methods to return an iterator vs a list.
- instances = self._schedule_run_instance(
- use_call,
- context, base_options,
- instance_type,
- availability_zone, injected_files,
- admin_password, image,
- num_instances, requested_networks,
- block_device_mapping, security_group,
- filter_properties, quota_reservations)
-
- if create_instance_here:
- return ([instance], reservation_id)
+ instances = []
+ instance_uuids = []
+ try:
+ for i in xrange(num_instances):
+ options = base_options.copy()
+ instance = self.create_db_entry_for_new_instance(
+ context, instance_type, image, options,
+ security_group, block_device_mapping)
+ instances.append(instance)
+ instance_uuids.append(instance['uuid'])
+ except Exception:
+ # Clean up as best we can.
+ with excutils.save_and_reraise_exception():
+ try:
+ for instance_uuid in instance_uuids:
+ self.db.instance_destroy(context,
+ instance_uuid)
+ finally:
+ QUOTAS.rollback(context, quota_reservations)
+
+ # Commit the reservations
+ QUOTAS.commit(context, quota_reservations)
+
+ request_spec = {
+ 'image': jsonutils.to_primitive(image),
+ 'instance_properties': base_options,
+ 'instance_type': instance_type,
+ 'instance_uuids': instance_uuids,
+ 'block_device_mapping': block_device_mapping,
+ 'security_group': security_group,
+ }
+
+ self.scheduler_rpcapi.run_instance(context,
+ request_spec=request_spec,
+ admin_password=admin_password, injected_files=injected_files,
+ requested_networks=requested_networks, is_first_time=True,
+ filter_properties=filter_properties)
+
return (instances, reservation_id)
@staticmethod
@@ -698,7 +700,7 @@ class API(base.Base):
#NOTE(bcwaldon): No policy check since this is only used by scheduler and
# the compute api. That should probably be cleaned up, though.
def create_db_entry_for_new_instance(self, context, instance_type, image,
- base_options, security_group, block_device_mapping, reservations):
+ base_options, security_group, block_device_mapping):
"""Create an entry in the DB for this new instance,
including any related table updates (such as security group,
etc).
@@ -724,48 +726,8 @@ class API(base.Base):
notifications.send_update_with_states(context, instance, None,
vm_states.BUILDING, None, None, service="api")
- # Commit the reservations
- if reservations:
- QUOTAS.commit(context, reservations)
-
return instance
- def _schedule_run_instance(self,
- use_call,
- context, base_options,
- instance_type,
- availability_zone, injected_files,
- admin_password, image,
- num_instances,
- requested_networks,
- block_device_mapping,
- security_group,
- filter_properties,
- quota_reservations):
- """Send a run_instance request to the schedulers for processing."""
-
- pid = context.project_id
- uid = context.user_id
-
- LOG.debug(_("Sending create to scheduler for %(pid)s/%(uid)s's") %
- locals())
-
- request_spec = {
- 'image': jsonutils.to_primitive(image),
- 'instance_properties': base_options,
- 'instance_type': instance_type,
- 'num_instances': num_instances,
- 'block_device_mapping': block_device_mapping,
- 'security_group': security_group,
- }
-
- return self.scheduler_rpcapi.run_instance(context,
- request_spec=request_spec,
- admin_password=admin_password, injected_files=injected_files,
- requested_networks=requested_networks, is_first_time=True,
- filter_properties=filter_properties,
- reservations=quota_reservations, call=use_call)
-
def _check_create_policies(self, context, availability_zone,
requested_networks, block_device_mapping):
"""Check policies for create()."""
@@ -795,21 +757,13 @@ class API(base.Base):
scheduler. The scheduler will determine where the instance(s)
go and will handle creating the DB entries.
- Returns a tuple of (instances, reservation_id) where instances
- could be 'None' or a list of instance dicts depending on if
- we waited for information from the scheduler or not.
+ Returns a tuple of (instances, reservation_id)
"""
self._check_create_policies(context, availability_zone,
requested_networks, block_device_mapping)
- # We can create the DB entry for the instance here if we're
- # only going to create 1 instance.
- # This speeds up API responses for builds
- # as we don't need to wait for the scheduler.
- create_instance_here = max_count == 1 or max_count is None
-
- (instances, reservation_id) = self._create_instance(
+ return self._create_instance(
context, instance_type,
image_href, kernel_id, ramdisk_id,
min_count, max_count,
@@ -820,24 +774,8 @@ class API(base.Base):
access_ip_v4, access_ip_v6,
requested_networks, config_drive,
block_device_mapping, auto_disk_config,
- create_instance_here=create_instance_here,
scheduler_hints=scheduler_hints)
- if create_instance_here or instances is None:
- return (instances, reservation_id)
-
- inst_ret_list = []
- for instance in instances:
- if instance.get('_is_precooked', False):
- inst_ret_list.append(instance)
- else:
- # Scheduler only gives us the 'id'. We need to pull
- # in the created instances from the DB
- instance = self.db.instance_get(context, instance['id'])
- inst_ret_list.append(dict(instance.iteritems()))
-
- return (inst_ret_list, reservation_id)
-
def trigger_provider_fw_rules_refresh(self, context):
"""Called when a rule is added/removed from a provider firewall"""
@@ -1561,7 +1499,7 @@ class API(base.Base):
request_spec = {
'instance_type': new_instance_type,
- 'num_instances': 1,
+ 'instance_uuids': instance['uuid'],
'instance_properties': instance}
filter_properties = {'ignore_hosts': []}
@@ -1689,6 +1627,9 @@ class API(base.Base):
@wrap_check_policy
def get_vnc_console(self, context, instance, console_type):
"""Get a url to an instance Console."""
+ if not instance['host']:
+ raise exception.InstanceNotReady(instance=instance)
+
connect_info = self.compute_rpcapi.get_vnc_console(context,
instance=instance, console_type=console_type)
@@ -1739,15 +1680,33 @@ class API(base.Base):
@wrap_check_policy
@check_instance_lock
- def attach_volume(self, context, instance, volume_id, device):
+ def attach_volume(self, context, instance, volume_id, device=None):
"""Attach an existing volume to an existing instance."""
- if not re.match("^/dev/x{0,1}[a-z]d[a-z]+$", device):
+ # NOTE(vish): Fail fast if the device is not going to pass. This
+ # will need to be removed along with the test if we
+ # change the logic in the manager for what constitutes
+ # a valid device.
+ if device and not re.match("^/dev/x{0,1}[a-z]d[a-z]+$", device):
raise exception.InvalidDevicePath(path=device)
- volume = self.volume_api.get(context, volume_id)
- self.volume_api.check_attach(context, volume)
- self.volume_api.reserve_volume(context, volume)
- self.compute_rpcapi.attach_volume(context, instance=instance,
- volume_id=volume_id, mountpoint=device)
+ # NOTE(vish): This is done on the compute host because we want
+ # to avoid a race where two devices are requested at
+ # the same time. When db access is removed from
+ # compute, the bdm will be created here and we will
+ # have to make sure that they are assigned atomically.
+ device = self.compute_rpcapi.reserve_block_device_name(
+ context, device=device, instance=instance)
+ try:
+ volume = self.volume_api.get(context, volume_id)
+ self.volume_api.check_attach(context, volume)
+ self.volume_api.reserve_volume(context, volume)
+ self.compute_rpcapi.attach_volume(context, instance=instance,
+ volume_id=volume_id, mountpoint=device)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ self.db.block_device_mapping_destroy_by_instance_and_device(
+ context, instance['uuid'], device)
+
+ return device
@check_instance_lock
def _detach_volume(self, context, instance, volume_id):
@@ -2062,6 +2021,13 @@ class KeypairAPI(base.Base):
})
return rval
+ def get_key_pair(self, context, user_id, key_name):
+ """Get a keypair by name."""
+ key_pair = self.db.key_pair_get(context, user_id, key_name)
+ return {'name': key_pair['name'],
+ 'public_key': key_pair['public_key'],
+ 'fingerprint': key_pair['fingerprint']}
+
class SecurityGroupAPI(base.Base):
"""
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 10056137c..6d499f9fb 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -247,7 +247,7 @@ def _get_image_meta(context, image_ref):
class ComputeManager(manager.SchedulerDependentManager):
"""Manages the running instances from creation to destruction."""
- RPC_API_VERSION = '1.43'
+ RPC_API_VERSION = '1.44'
def __init__(self, compute_driver=None, *args, **kwargs):
"""Load configuration options and connect to the hypervisor."""
@@ -329,10 +329,17 @@ class ComputeManager(manager.SchedulerDependentManager):
LOG.info(
_('Rebooting instance after nova-compute restart.'),
locals(), instance=instance)
+
+ block_device_info = \
+ self._get_instance_volume_block_device_info(
+ context, instance['uuid'])
+
try:
- self.driver.resume_state_on_host_boot(context,
- instance,
- self._legacy_nw_info(net_info))
+ self.driver.resume_state_on_host_boot(
+ context,
+ instance,
+ self._legacy_nw_info(net_info),
+ block_device_info)
except NotImplementedError:
LOG.warning(_('Hypervisor driver does not support '
'resume guests'), instance=instance)
@@ -594,14 +601,13 @@ class ComputeManager(manager.SchedulerDependentManager):
instance_uuid=instance_uuid)
return
- request_spec['num_instances'] = 1
+ request_spec['instance_uuids'] = [instance_uuid]
LOG.debug(_("Re-scheduling instance: attempt %d"),
retry['num_attempts'], instance_uuid=instance_uuid)
self.scheduler_rpcapi.run_instance(context,
request_spec, admin_password, injected_files,
- requested_networks, is_first_time, filter_properties,
- reservations=None, call=False)
+ requested_networks, is_first_time, filter_properties)
return True
@manager.periodic_task
@@ -1119,9 +1125,13 @@ class ComputeManager(manager.SchedulerDependentManager):
context=context, instance=instance)
network_info = self._get_instance_nw_info(context, instance)
+
+ block_device_info = self._get_instance_volume_block_device_info(
+ context, instance['uuid'])
+
try:
self.driver.reboot(instance, self._legacy_nw_info(network_info),
- reboot_type)
+ reboot_type, block_device_info)
except Exception, exc:
LOG.error(_('Cannot reboot instance: %(exc)s'), locals(),
context=context, instance=instance)
@@ -2025,11 +2035,39 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
+ @wrap_instance_fault
+ def reserve_block_device_name(self, context, instance, device):
+
+ @utils.synchronized(instance['uuid'])
+ def do_reserve():
+ result = compute_utils.get_device_name_for_instance(context,
+ instance,
+ device)
+ # NOTE(vish): create bdm here to avoid race condition
+ values = {'instance_uuid': instance['uuid'],
+ 'device_name': result}
+ self.db.block_device_mapping_create(context, values)
+ return result
+ return do_reserve()
+
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
+ @reverts_task_state
@checks_instance_lock
@wrap_instance_fault
def attach_volume(self, context, volume_id, mountpoint, instance_uuid=None,
instance=None):
"""Attach a volume to an instance."""
+ try:
+ return self._attach_volume(context, volume_id, mountpoint,
+ instance_uuid, instance)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ instance_uuid = instance_uuid or instance.get('uuid')
+ self.db.block_device_mapping_destroy_by_instance_and_device(
+ context, instance_uuid, mountpoint)
+
+ def _attach_volume(self, context, volume_id, mountpoint, instance_uuid,
+ instance):
volume = self.volume_api.get(context, volume_id)
context = context.elevated()
if not instance:
@@ -2076,7 +2114,7 @@ class ComputeManager(manager.SchedulerDependentManager):
'volume_id': volume_id,
'volume_size': None,
'no_device': None}
- self.db.block_device_mapping_create(context, values)
+ self.db.block_device_mapping_update_or_create(context, values)
def _detach_volume(self, context, instance, bdm):
"""Do the actual driver detach using block device mapping."""
@@ -2134,7 +2172,7 @@ class ComputeManager(manager.SchedulerDependentManager):
pass
def get_instance_disk_info(self, context, instance_name):
- """Getting infomation of instance's current disk.
+ """Getting information of instance's current disk.
DEPRECATED: This method is no longer used by any current code, but it
is left here to provide backwards compatibility in the rpcapi.
@@ -2408,7 +2446,7 @@ class ComputeManager(manager.SchedulerDependentManager):
"""
if not instance:
instance = self.db.instance_get(context, instance_id)
- LOG.info(_('Post operation of migraton started'),
+ LOG.info(_('Post operation of migration started'),
instance=instance)
# NOTE(tr3buchet): setup networks on destination host
@@ -2802,7 +2840,7 @@ class ComputeManager(manager.SchedulerDependentManager):
# to allow all the hooks and checks to be performed.
self.compute_api.stop(context, db_instance)
except Exception:
- # Note(maoy): there is no need to propergate the error
+ # Note(maoy): there is no need to propagate the error
# because the same power_state will be retrieved next
# time and retried.
# For example, there might be another task scheduled.
diff --git a/nova/compute/rpcapi.py b/nova/compute/rpcapi.py
index c81d75356..584335754 100644
--- a/nova/compute/rpcapi.py
+++ b/nova/compute/rpcapi.py
@@ -124,6 +124,7 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
finish_resize(), confirm_resize(), revert_resize() and
finish_revert_resize()
1.43 - Add migrate_data to live_migration()
+ 1.44 - Adds reserve_block_device_name()
'''
BASE_RPC_API_VERSION = '1.0'
@@ -479,6 +480,13 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
return self.call(ctxt, self.make_msg('get_host_uptime'), topic,
version='1.1')
+ def reserve_block_device_name(self, ctxt, instance, device):
+ instance_p = jsonutils.to_primitive(instance)
+ return self.call(ctxt, self.make_msg('reserve_block_device_name',
+ instance=instance_p, device=device),
+ topic=_compute_topic(self.topic, ctxt, None, instance),
+ version='1.44')
+
def snapshot_instance(self, ctxt, instance, image_id, image_type,
backup_type, rotation):
instance_p = jsonutils.to_primitive(instance)
diff --git a/nova/compute/utils.py b/nova/compute/utils.py
index 6d2fb2202..ef967c934 100644
--- a/nova/compute/utils.py
+++ b/nova/compute/utils.py
@@ -16,6 +16,10 @@
"""Compute-related Utilities and helpers."""
+import re
+import string
+
+from nova import block_device
from nova import db
from nova import exception
from nova import flags
@@ -29,6 +33,54 @@ FLAGS = flags.FLAGS
LOG = log.getLogger(__name__)
+def get_device_name_for_instance(context, instance, device):
+ # NOTE(vish): this will generate a unique device name that is not
+ # in use already. It is a reasonable guess at where
+ # it will show up in a linux guest, but it may not
+ # always be correct
+ req_prefix = None
+ req_letters = None
+ if device:
+ try:
+ match = re.match("(^/dev/x{0,1}[a-z]d)([a-z]+)$", device)
+ req_prefix, req_letters = match.groups()
+ except (TypeError, AttributeError, ValueError):
+ raise exception.InvalidDevicePath(path=device)
+ bdms = db.block_device_mapping_get_all_by_instance(context,
+ instance['uuid'])
+ mappings = block_device.instance_block_mapping(instance, bdms)
+ try:
+ match = re.match("(^/dev/x{0,1}[a-z]d)[a-z]+[0-9]*$", mappings['root'])
+ prefix = match.groups()[0]
+ except (TypeError, AttributeError, ValueError):
+ raise exception.InvalidDevicePath(path=mappings['root'])
+ if not req_prefix:
+ req_prefix = prefix
+ letters_list = []
+ for _name, device in mappings.iteritems():
+ letter = block_device.strip_prefix(device)
+ # NOTE(vish): delete numbers in case we have something like
+ # /dev/sda1
+ letter = re.sub("\d+", "", letter)
+ letters_list.append(letter)
+ used_letters = set(letters_list)
+ if not req_letters:
+ req_letters = _get_unused_letters(used_letters)
+ if req_letters in used_letters:
+ raise exception.DevicePathInUse(path=device)
+ return req_prefix + req_letters
+
+
+def _get_unused_letters(used_letters):
+ doubles = [first + second for second in string.ascii_lowercase
+ for first in string.ascii_lowercase]
+ all_letters = set(list(string.ascii_lowercase) + doubles)
+ letters = list(all_letters - used_letters)
+ # NOTE(vish): prepend ` so all shorter sequences sort first
+ letters.sort(key=lambda x: x.rjust(2, '`'))
+ return letters[0]
+
+
def notify_usage_exists(context, instance_ref, current_period=False,
ignore_missing_network_data=True,
system_metadata=None, extra_usage_info=None):
diff --git a/nova/console/api.py b/nova/console/api.py
index 32004094a..8becf35cf 100644
--- a/nova/console/api.py
+++ b/nova/console/api.py
@@ -43,7 +43,7 @@ class API(base.Base):
def delete_console(self, context, instance_uuid, console_uuid):
console = self.db.console_get(context, console_uuid, instance_uuid)
topic = rpc.queue_get_for(context, FLAGS.console_topic,
- pool['host'])
+ console['pool']['host'])
rpcapi = console_rpcapi.ConsoleAPI(topic=topic)
rpcapi.remove_console(context, console['id'])
@@ -62,12 +62,6 @@ class API(base.Base):
rpcapi = compute_rpcapi.ComputeAPI()
return rpcapi.get_console_topic(context, instance_host)
- def _translate_id_if_necessary(self, context, instance_uuid):
- if not utils.is_uuid_like(instance_uuid):
- instance = self.db.instance_get(context, instance_uuid)
- instance_uuid = instance['uuid']
- return instance_uuid
-
def _get_instance(self, context, instance_uuid):
if utils.is_uuid_like(instance_uuid):
instance = self.db.instance_get_by_uuid(context, instance_uuid)
diff --git a/nova/context.py b/nova/context.py
index 66697b567..b6fd105eb 100644
--- a/nova/context.py
+++ b/nova/context.py
@@ -24,6 +24,7 @@ import copy
from nova.openstack.common import local
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
+from nova import policy
from nova import utils
@@ -66,9 +67,7 @@ class RequestContext(object):
self.roles = roles or []
self.is_admin = is_admin
if self.is_admin is None:
- self.is_admin = 'admin' in [x.lower() for x in self.roles]
- elif self.is_admin and 'admin' not in self.roles:
- self.roles.append('admin')
+ self.is_admin = policy.check_admin_role(self.roles)
self.read_deleted = read_deleted
self.remote_address = remote_address
if not timestamp:
diff --git a/nova/crypto.py b/nova/crypto.py
index bdb056c93..d1c9919e2 100644
--- a/nova/crypto.py
+++ b/nova/crypto.py
@@ -24,7 +24,6 @@ Includes root and intermediate CAs, SSH key_pairs and x509 certificates.
from __future__ import absolute_import
-import base64
import hashlib
import os
import string
diff --git a/nova/db/api.py b/nova/db/api.py
index ceb361722..94fdd8ce2 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -568,12 +568,6 @@ def instance_create(context, values):
return IMPL.instance_create(context, values)
-def instance_data_get_for_user(context, user_id, project_id, session=None):
- """Get (instance_count, total_cores, total_ram) for user."""
- return IMPL.instance_data_get_for_user(context, user_id, project_id,
- session=session)
-
-
def instance_data_get_for_project(context, project_id, session=None):
"""Get (instance_count, total_cores, total_ram) for project."""
return IMPL.instance_data_get_for_project(context, project_id,
@@ -955,42 +949,6 @@ def quota_destroy(context, project_id, resource):
###################
-def quota_create_for_user(context, user_id, project_id, resource, limit):
- """Create a quota for the given user and project."""
- return IMPL.quota_create_for_user(context, user_id,
- project_id, resource, limit)
-
-
-def quota_get_for_user(context, user_id, project_id, resource):
- """Retrieve a quota or raise if it does not exist."""
- return IMPL.quota_get_for_user(context, user_id,
- project_id, resource)
-
-
-def quota_get_all_by_user(context, user_id, project_id):
- """Retrieve all quotas associated with a given user and project."""
- return IMPL.quota_get_all_by_user(context, user_id, project_id)
-
-
-def quota_get_remaining(context, project_id):
- """Retrieve the remaining quotas associated with a given project."""
- return IMPL.quota_get_remaining(context, project_id)
-
-
-def quota_update_for_user(context, user_id, project_id, resource, limit):
- """Update a quota or raise if it does not exist."""
- return IMPL.quota_update_for_user(context, user_id,
- project_id, resource, limit)
-
-
-def quota_destroy_for_user(context, user_id, project_id, resource):
- """Destroy the quota or raise if it does not exist."""
- return IMPL.quota_destroy_for_user(context, user_id, project_id, resource)
-
-
-###################
-
-
def quota_class_create(context, class_name, resource, limit):
"""Create a quota class for the given name and resource."""
return IMPL.quota_class_create(context, class_name, resource, limit)
@@ -1024,21 +982,16 @@ def quota_class_destroy_all_by_name(context, class_name):
###################
-def quota_usage_create(context, user_id, project_id, resource, in_use,
- reserved, until_refresh):
- """Create a quota usage for the given user and resource."""
- return IMPL.quota_usage_create(context, user_id, project_id, resource,
+def quota_usage_create(context, project_id, resource, in_use, reserved,
+ until_refresh):
+ """Create a quota usage for the given project and resource."""
+ return IMPL.quota_usage_create(context, project_id, resource,
in_use, reserved, until_refresh)
-def quota_usage_get(context, user_id, project_id, resource):
+def quota_usage_get(context, project_id, resource):
"""Retrieve a quota usage or raise if it does not exist."""
- return IMPL.quota_usage_get(context, user_id, project_id, resource)
-
-
-def quota_usage_get_all_by_user(context, user_id, project_id):
- """Retrieve all usage associated with a given user."""
- return IMPL.quota_usage_get_all_by_user(context, user_id, project_id)
+ return IMPL.quota_usage_get(context, project_id, resource)
def quota_usage_get_all_by_project(context, project_id):
@@ -1046,25 +999,25 @@ def quota_usage_get_all_by_project(context, project_id):
return IMPL.quota_usage_get_all_by_project(context, project_id)
-def quota_usage_update(context, user_id, project_id, resource, in_use,
- reserved, until_refresh):
+def quota_usage_update(context, class_name, resource, in_use, reserved,
+ until_refresh):
"""Update a quota usage or raise if it does not exist."""
- return IMPL.quota_usage_update(context, user_id, project_id, resource,
+ return IMPL.quota_usage_update(context, project_id, resource,
in_use, reserved, until_refresh)
-def quota_usage_destroy(context, user_id, project_id, resource):
+def quota_usage_destroy(context, project_id, resource):
"""Destroy the quota usage or raise if it does not exist."""
- return IMPL.quota_usage_destroy(context, user_id, project_id, resource)
+ return IMPL.quota_usage_destroy(context, project_id, resource)
###################
-def reservation_create(context, uuid, usage, user_id, project_id, resource,
- delta, expire):
- """Create a reservation for the given user and resource."""
- return IMPL.reservation_create(context, uuid, usage, user_id, project_id,
+def reservation_create(context, uuid, usage, project_id, resource, delta,
+ expire):
+ """Create a reservation for the given project and resource."""
+ return IMPL.reservation_create(context, uuid, usage, project_id,
resource, delta, expire)
@@ -1073,9 +1026,9 @@ def reservation_get(context, uuid):
return IMPL.reservation_get(context, uuid)
-def reservation_get_all_by_user(context, user_id, project_id):
- """Retrieve all reservations associated with a given user."""
- return IMPL.reservation_get_all_by_user(context, user_id, project_id)
+def reservation_get_all_by_project(context, project_id):
+ """Retrieve all reservations associated with a given project."""
+ return IMPL.reservation_get_all_by_project(context, project_id)
def reservation_destroy(context, uuid):
@@ -1103,11 +1056,6 @@ def reservation_rollback(context, reservations):
return IMPL.reservation_rollback(context, reservations)
-def quota_destroy_all_by_user(context, user_id, project_id):
- """Destroy all quotas associated with a given user."""
- return IMPL.quota_destroy_all_by_user(context, user_id, project_id)
-
-
def quota_destroy_all_by_project(context, project_id):
"""Destroy all quotas associated with a given project."""
return IMPL.quota_destroy_all_by_project(context, project_id)
@@ -1136,12 +1084,6 @@ def volume_create(context, values):
return IMPL.volume_create(context, values)
-def volume_data_get_for_user(context, user_id, project_id, session=None):
- """Get (volume_count, gigabytes) for user."""
- return IMPL.volume_data_get_for_user(context, user_id, project_id,
- session=session)
-
-
def volume_data_get_for_project(context, project_id, session=None):
"""Get (volume_count, gigabytes) for project."""
return IMPL.volume_data_get_for_project(context, project_id,
@@ -1298,9 +1240,16 @@ def block_device_mapping_destroy(context, bdm_id):
return IMPL.block_device_mapping_destroy(context, bdm_id)
+def block_device_mapping_destroy_by_instance_and_device(context, instance_uuid,
+ device_name):
+ """Destroy the block device mapping."""
+ return IMPL.block_device_mapping_destroy_by_instance_and_device(
+ context, instance_uuid, device_name)
+
+
def block_device_mapping_destroy_by_instance_and_volume(context, instance_uuid,
volume_id):
- """Destroy the block device mapping or raise if it does not exist."""
+ """Destroy the block device mapping."""
return IMPL.block_device_mapping_destroy_by_instance_and_volume(
context, instance_uuid, volume_id)
@@ -1323,11 +1272,6 @@ def security_group_get_by_name(context, project_id, group_name):
return IMPL.security_group_get_by_name(context, project_id, group_name)
-def security_group_get_by_user(context, user_id, project_id):
- """Get all security groups belonging to a user."""
- return IMPL.security_group_get_by_user(context, user_id, project_id)
-
-
def security_group_get_by_project(context, project_id):
"""Get all security groups belonging to a project."""
return IMPL.security_group_get_by_project(context, project_id)
@@ -1363,12 +1307,6 @@ def security_group_destroy(context, security_group_id):
return IMPL.security_group_destroy(context, security_group_id)
-def security_group_count_by_user(context, user_id, project_id, session=None):
- """Count number of security groups for a user in specific project."""
- return IMPL.security_group_count_by_user(context, user_id, project_id,
- session=session)
-
-
def security_group_count_by_project(context, project_id, session=None):
"""Count number of security groups in a project."""
return IMPL.security_group_count_by_project(context, project_id,
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 85f5d8f48..d15b7b353 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -1442,33 +1442,20 @@ def instance_create(context, values):
return instance_ref
-def _get_instance_data(context, project_id, user_id=None, session=None):
+@require_admin_context
+def instance_data_get_for_project(context, project_id, session=None):
result = model_query(context,
func.count(models.Instance.id),
func.sum(models.Instance.vcpus),
func.sum(models.Instance.memory_mb),
read_deleted="no",
session=session).\
- filter_by(project_id=project_id)
- if user_id:
- result = result.filter_by(user_id=user_id).first()
- else:
- result = result.first()
+ filter_by(project_id=project_id).\
+ first()
# NOTE(vish): convert None to 0
return (result[0] or 0, result[1] or 0, result[2] or 0)
-@require_admin_context
-def instance_data_get_for_project(context, project_id, session=None):
- return _get_instance_data(context, project_id, session=session)
-
-
-@require_admin_context
-def instance_data_get_for_user(context, user_id, project_id, session=None):
- return _get_instance_data(context, project_id, user_id=user_id,
- session=session)
-
-
@require_context
def instance_destroy(context, instance_uuid, constraint=None):
session = get_session()
@@ -2390,11 +2377,10 @@ def quota_get(context, project_id, resource, session=None):
@require_context
-def quota_get_all_by_project(context, project_id, session=None):
+def quota_get_all_by_project(context, project_id):
authorize_project_context(context, project_id)
- rows = model_query(context, models.Quota, session=session,
- read_deleted="no").\
+ rows = model_query(context, models.Quota, read_deleted="no").\
filter_by(project_id=project_id).\
all()
@@ -2436,97 +2422,6 @@ def quota_destroy(context, project_id, resource):
@require_context
-def quota_get_for_user(context, user_id, project_id, resource, session=None):
- authorize_project_context(context, project_id)
- result = model_query(context, models.UserQuota, session=session,
- read_deleted="no").\
- filter_by(user_id=user_id).\
- filter_by(project_id=project_id).\
- filter_by(resource=resource).\
- first()
-
- if not result:
- raise exception.UserQuotaNotFound(project_id=project_id,
- user_id=user_id)
-
- return result
-
-
-@require_context
-def quota_get_all_by_user(context, user_id, project_id):
- authorize_project_context(context, project_id)
-
- rows = model_query(context, models.UserQuota, read_deleted="no").\
- filter_by(user_id=user_id).\
- filter_by(project_id=project_id).\
- all()
-
- result = {'user_id': user_id, 'project_id': project_id}
- for row in rows:
- result[row.resource] = row.hard_limit
-
- return result
-
-
-@require_context
-def quota_get_remaining(context, project_id):
- authorize_project_context(context, project_id)
-
- session = get_session()
- with session.begin():
- rows = model_query(context, models.UserQuota, session=session,
- read_deleted="no").\
- filter_by(project_id=project_id).\
- all()
-
- result = quota_get_all_by_project(context, project_id, session=session)
-
- for row in rows:
- if row.resource in result:
- result[row.resource] -= row.hard_limit
-
- result['project_id'] = project_id
-
- return result
-
-
-@require_context
-def quota_create_for_user(context, user_id, project_id, resource, limit):
- authorize_project_context(context, project_id)
- quota_ref = models.UserQuota()
- quota_ref.user_id = user_id
- quota_ref.project_id = project_id
- quota_ref.resource = resource
- quota_ref.hard_limit = limit
- quota_ref.save()
- return quota_ref
-
-
-@require_context
-def quota_update_for_user(context, user_id, project_id, resource, limit):
- authorize_project_context(context, project_id)
- session = get_session()
- with session.begin():
- quota_ref = quota_get_for_user(context, user_id, project_id, resource,
- session=session)
- quota_ref.hard_limit = limit
- quota_ref.save(session=session)
-
-
-@require_context
-def quota_destroy_for_user(context, user_id, project_id, resource):
- authorize_project_context(context, project_id)
- session = get_session()
- with session.begin():
- quota_ref = quota_get_for_user(context, user_id, project_id, resource,
- session=session)
- quota_ref.delete(session=session)
-
-
-###################
-
-
-@require_context
def quota_class_get(context, class_name, resource, session=None):
result = model_query(context, models.QuotaClass, session=session,
read_deleted="no").\
@@ -2601,10 +2496,9 @@ def quota_class_destroy_all_by_name(context, class_name):
@require_context
-def quota_usage_get(context, user_id, project_id, resource, session=None):
+def quota_usage_get(context, project_id, resource, session=None):
result = model_query(context, models.QuotaUsage, session=session,
read_deleted="no").\
- filter_by(user_id=user_id).\
filter_by(project_id=project_id).\
filter_by(resource=resource).\
first()
@@ -2616,22 +2510,6 @@ def quota_usage_get(context, user_id, project_id, resource, session=None):
@require_context
-def quota_usage_get_all_by_user(context, user_id, project_id):
- authorize_project_context(context, project_id)
-
- rows = model_query(context, models.QuotaUsage, read_deleted="no").\
- filter_by(user_id=user_id).\
- filter_by(project_id=project_id).\
- all()
-
- result = {'user_id': user_id, 'project_id': project_id}
- for row in rows:
- result[row.resource] = dict(in_use=row.in_use, reserved=row.reserved)
-
- return result
-
-
-@require_context
def quota_usage_get_all_by_project(context, project_id):
authorize_project_context(context, project_id)
@@ -2640,20 +2518,16 @@ def quota_usage_get_all_by_project(context, project_id):
all()
result = {'project_id': project_id}
- in_use = 0
- reserved = 0
for row in rows:
- result[row.resource] = dict(in_use=in_use + row.in_use,
- reserved=reserved + row.reserved)
+ result[row.resource] = dict(in_use=row.in_use, reserved=row.reserved)
return result
@require_admin_context
-def quota_usage_create(context, user_id, project_id, resource, in_use,
- reserved, until_refresh, session=None):
+def quota_usage_create(context, project_id, resource, in_use, reserved,
+ until_refresh, session=None):
quota_usage_ref = models.QuotaUsage()
- quota_usage_ref.user_id = user_id
quota_usage_ref.project_id = project_id
quota_usage_ref.resource = resource
quota_usage_ref.in_use = in_use
@@ -2665,11 +2539,11 @@ def quota_usage_create(context, user_id, project_id, resource, in_use,
@require_admin_context
-def quota_usage_update(context, user_id, project_id, resource, in_use,
- reserved, until_refresh, session=None):
+def quota_usage_update(context, project_id, resource, in_use, reserved,
+ until_refresh, session=None):
def do_update(session):
- quota_usage_ref = quota_usage_get(context, user_id, project_id,
- resource, session=session)
+ quota_usage_ref = quota_usage_get(context, project_id, resource,
+ session=session)
quota_usage_ref.in_use = in_use
quota_usage_ref.reserved = reserved
quota_usage_ref.until_refresh = until_refresh
@@ -2685,11 +2559,11 @@ def quota_usage_update(context, user_id, project_id, resource, in_use,
@require_admin_context
-def quota_usage_destroy(context, user_id, project_id, resource):
+def quota_usage_destroy(context, project_id, resource):
session = get_session()
with session.begin():
- quota_usage_ref = quota_usage_get(context, user_id, project_id,
- resource, session=session)
+ quota_usage_ref = quota_usage_get(context, project_id, resource,
+ session=session)
quota_usage_ref.delete(session=session)
@@ -2710,15 +2584,14 @@ def reservation_get(context, uuid, session=None):
@require_context
-def reservation_get_all_by_user(context, user_id, project_id):
+def reservation_get_all_by_project(context, project_id):
authorize_project_context(context, project_id)
rows = model_query(context, models.QuotaUsage, read_deleted="no").\
- filter_by(user_id=user_id).\
filter_by(project_id=project_id).\
all()
- result = {'user_id': user_id, 'project_id': project_id}
+ result = {'project_id': project_id}
for row in rows:
result.setdefault(row.resource, {})
result[row.resource][row.uuid] = row.delta
@@ -2727,12 +2600,11 @@ def reservation_get_all_by_user(context, user_id, project_id):
@require_admin_context
-def reservation_create(context, uuid, usage, user_id, project_id, resource,
- delta, expire, session=None):
+def reservation_create(context, uuid, usage, project_id, resource, delta,
+ expire, session=None):
reservation_ref = models.Reservation()
reservation_ref.uuid = uuid
reservation_ref.usage_id = usage['id']
- reservation_ref.user_id = user_id
reservation_ref.project_id = project_id
reservation_ref.resource = resource
reservation_ref.delta = delta
@@ -2762,7 +2634,6 @@ def _get_quota_usages(context, session):
rows = model_query(context, models.QuotaUsage,
read_deleted="no",
session=session).\
- filter_by(user_id=context.user_id).\
filter_by(project_id=context.project_id).\
with_lockmode('update').\
all()
@@ -2787,7 +2658,6 @@ def quota_reserve(context, resources, quotas, deltas, expire,
refresh = False
if resource not in usages:
usages[resource] = quota_usage_create(elevated,
- context.user_id,
context.project_id,
resource,
0, 0,
@@ -2811,13 +2681,11 @@ def quota_reserve(context, resources, quotas, deltas, expire,
# Grab the sync routine
sync = resources[resource].sync
- updates = sync(elevated, context.user_id,
- context.project_id, session)
+ updates = sync(elevated, context.project_id, session)
for res, in_use in updates.items():
# Make sure we have a destination for the usage!
if res not in usages:
usages[res] = quota_usage_create(elevated,
- context.user_id,
context.project_id,
res,
0, 0,
@@ -2868,7 +2736,6 @@ def quota_reserve(context, resources, quotas, deltas, expire,
reservation = reservation_create(elevated,
str(utils.gen_uuid()),
usages[resource],
- context.user_id,
context.project_id,
resource, delta, expire,
session=session)
@@ -2953,38 +2820,6 @@ def reservation_rollback(context, reservations):
@require_admin_context
-def quota_destroy_all_by_user(context, user_id, project_id):
- session = get_session()
- with session.begin():
- quotas = model_query(context, models.UserQuota, session=session,
- read_deleted="no").\
- filter_by(user_id=user_id).\
- filter_by(project_id=project_id).\
- all()
-
- for quota_ref in quotas:
- quota_ref.delete(session=session)
-
- quota_usages = model_query(context, models.QuotaUsage,
- session=session, read_deleted="no").\
- filter_by(user_id=user_id).\
- filter_by(project_id=project_id).\
- all()
-
- for quota_usage_ref in quota_usages:
- quota_usage_ref.delete(session=session)
-
- reservations = model_query(context, models.Reservation,
- session=session, read_deleted="no").\
- filter_by(user_id=user_id).\
- filter_by(project_id=project_id).\
- all()
-
- for reservation_ref in reservations:
- reservation_ref.delete(session=session)
-
-
-@require_admin_context
def quota_destroy_all_by_project(context, project_id):
session = get_session()
with session.begin():
@@ -3089,34 +2924,21 @@ def volume_create(context, values):
return volume_ref
-def _get_volume_data(context, project_id, user_id=None, session=None):
+@require_admin_context
+def volume_data_get_for_project(context, project_id, session=None):
result = model_query(context,
func.count(models.Volume.id),
func.sum(models.Volume.size),
read_deleted="no",
session=session).\
- filter_by(project_id=project_id)
-
- if user_id:
- result = result.filter_by(user_id=user_id).first()
- else:
- result = result.first()
+ filter_by(project_id=project_id).\
+ first()
+ # NOTE(vish): convert None to 0
return (result[0] or 0, result[1] or 0)
@require_admin_context
-def volume_data_get_for_user(context, user_id, project_id, session=None):
- return _get_volume_data(context, project_id, user_id=user_id,
- session=session)
-
-
-@require_admin_context
-def volume_data_get_for_project(context, project_id, session=None):
- return _get_volume_data(context, project_id, session=session)
-
-
-@require_admin_context
def volume_destroy(context, volume_id):
session = get_session()
with session.begin():
@@ -3463,8 +3285,7 @@ def snapshot_update(context, snapshot_id, values):
def _block_device_mapping_get_query(context, session=None):
- return model_query(context, models.BlockDeviceMapping, session=session,
- read_deleted="no")
+ return model_query(context, models.BlockDeviceMapping, session=session)
@require_context
@@ -3547,6 +3368,19 @@ def block_device_mapping_destroy_by_instance_and_volume(context, instance_uuid,
'updated_at': literal_column('updated_at')})
+@require_context
+def block_device_mapping_destroy_by_instance_and_device(context, instance_uuid,
+ device_name):
+ session = get_session()
+ with session.begin():
+ _block_device_mapping_get_query(context, session=session).\
+ filter_by(instance_uuid=instance_uuid).\
+ filter_by(device_name=device_name).\
+ update({'deleted': True,
+ 'deleted_at': timeutils.utcnow(),
+ 'updated_at': literal_column('updated_at')})
+
+
###################
def _security_group_get_query(context, session=None, read_deleted=None,
@@ -3625,14 +3459,6 @@ def security_group_get_by_name(context, project_id, group_name,
@require_context
-def security_group_get_by_user(context, user_id, project_id):
- return _security_group_get_query(context, read_deleted="no").\
- filter_by(user_id=user_id).\
- filter_by(project_id=project_id).\
- all()
-
-
-@require_context
def security_group_get_by_project(context, project_id):
return _security_group_get_query(context, read_deleted="no").\
filter_by(project_id=project_id).\
@@ -3728,16 +3554,6 @@ def security_group_destroy(context, security_group_id):
@require_context
-def security_group_count_by_user(context, user_id, project_id, session=None):
- authorize_project_context(context, project_id)
- return model_query(context, models.SecurityGroup, read_deleted="no",
- session=session).\
- filter_by(user_id=user_id).\
- filter_by(project_id=project_id).\
- count()
-
-
-@require_context
def security_group_count_by_project(context, project_id, session=None):
authorize_project_context(context, project_id)
return model_query(context, models.SecurityGroup, read_deleted="no",
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/082_essex.py b/nova/db/sqlalchemy/migrate_repo/versions/082_essex.py
index 4e6048714..4e2a1a70d 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/082_essex.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/082_essex.py
@@ -66,7 +66,6 @@ def _populate_instance_types(instance_types_table):
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
- dialect = migrate_engine.url.get_dialect().name
agent_builds = Table('agent_builds', meta,
Column('created_at', DateTime),
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/088_change_instance_id_to_uuid_in_block_device_mapping.py b/nova/db/sqlalchemy/migrate_repo/versions/088_change_instance_id_to_uuid_in_block_device_mapping.py
index 52a340889..73d8b6968 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/088_change_instance_id_to_uuid_in_block_device_mapping.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/088_change_instance_id_to_uuid_in_block_device_mapping.py
@@ -29,7 +29,6 @@ LOG = logging.getLogger(__name__)
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
- dialect = migrate_engine.url.get_dialect().name
block_device_mapping = Table('block_device_mapping', meta, autoload=True)
instances = Table('instances', meta, autoload=True)
uuid_column = Column('instance_uuid', String(36))
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/090_modify_volume_id_datatype.py b/nova/db/sqlalchemy/migrate_repo/versions/090_modify_volume_id_datatype.py
index 4a2ba2d47..4be63b322 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/090_modify_volume_id_datatype.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/090_modify_volume_id_datatype.py
@@ -29,7 +29,6 @@ def upgrade(migrate_engine):
"""Convert volume and snapshot id columns from int to varchar."""
meta = MetaData()
meta.bind = migrate_engine
- dialect = migrate_engine.url.get_dialect().name
volumes = Table('volumes', meta, autoload=True)
snapshots = Table('snapshots', meta, autoload=True)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/105_instance_info_caches_uses_uuid.py b/nova/db/sqlalchemy/migrate_repo/versions/105_instance_info_caches_uses_uuid.py
index c1c7d7a44..c4c13e539 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/105_instance_info_caches_uses_uuid.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/105_instance_info_caches_uses_uuid.py
@@ -16,8 +16,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-from sqlalchemy import select, Column, ForeignKey, Integer
-from sqlalchemy import MetaData, String, Table
+from sqlalchemy import MetaData, Table
from migrate import ForeignKeyConstraint
from nova.openstack.common import log as logging
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/106_add_foreign_keys.py b/nova/db/sqlalchemy/migrate_repo/versions/106_add_foreign_keys.py
index 2874b4042..2c483007c 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/106_add_foreign_keys.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/106_add_foreign_keys.py
@@ -16,8 +16,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-from sqlalchemy import select, Column, ForeignKey, Integer
-from sqlalchemy import MetaData, String, Table
+from sqlalchemy import MetaData, Table
from migrate import ForeignKeyConstraint
from nova.openstack.common import log as logging
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/107_add_instance_id_mappings.py b/nova/db/sqlalchemy/migrate_repo/versions/107_add_instance_id_mappings.py
index 39985a1af..250906c62 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/107_add_instance_id_mappings.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/107_add_instance_id_mappings.py
@@ -16,7 +16,7 @@
from nova.openstack.common import log as logging
from sqlalchemy import Boolean, Column, DateTime, Integer
-from sqlalchemy import Index, MetaData, String, Table
+from sqlalchemy import MetaData, String, Table
LOG = logging.getLogger(__name__)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/108_task_log.py b/nova/db/sqlalchemy/migrate_repo/versions/108_task_log.py
index e6aedc1a6..d8593bd77 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/108_task_log.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/108_task_log.py
@@ -15,7 +15,7 @@
# under the License.
from sqlalchemy import Boolean, Column, DateTime, Integer
-from sqlalchemy import Index, MetaData, String, Table
+from sqlalchemy import MetaData, String, Table
def upgrade(migrate_engine):
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/109_drop_dns_domains_project_id_fkey.py b/nova/db/sqlalchemy/migrate_repo/versions/109_drop_dns_domains_project_id_fkey.py
index 16489b068..a2b0792d3 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/109_drop_dns_domains_project_id_fkey.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/109_drop_dns_domains_project_id_fkey.py
@@ -17,7 +17,6 @@
from migrate import ForeignKeyConstraint
from sqlalchemy import MetaData, Table
-from sqlalchemy import Column, ForeignKey, Integer
from nova.openstack.common import log as logging
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/116_drop_user_quotas_key_and_value.py b/nova/db/sqlalchemy/migrate_repo/versions/116_drop_user_quotas_key_and_value.py
new file mode 100644
index 000000000..ccf9d66b8
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/116_drop_user_quotas_key_and_value.py
@@ -0,0 +1,98 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.openstack.common import log as logging
+from sqlalchemy import Boolean, Column, DateTime, Integer
+from sqlalchemy import MetaData, String, Table
+
+LOG = logging.getLogger(__name__)
+
+
+def upgrade(migrate_engine):
+ # Reverse the previous migration
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ reservations = Table('reservations', meta, autoload=True)
+ d = reservations.delete(reservations.c.deleted == True)
+ d.execute()
+ reservations.drop_column('user_id')
+
+ quota_usages = Table('quota_usages', meta, autoload=True)
+ d = quota_usages.delete(quota_usages.c.user_id != None)
+ d.execute()
+ quota_usages.drop_column('user_id')
+
+ user_quotas = Table('user_quotas', meta, autoload=True)
+ try:
+ user_quotas.drop()
+ except Exception:
+ LOG.error(_("user_quotas table not dropped"))
+ raise
+
+
+def downgrade(migrate_engine):
+ # Undo the reversal of the previous migration
+ # (data is not preserved)
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ # Add 'user_id' column to quota_usages table.
+ quota_usages = Table('quota_usages', meta, autoload=True)
+ user_id = Column('user_id',
+ String(length=255, convert_unicode=False,
+ assert_unicode=None, unicode_error=None,
+ _warn_on_bytestring=False))
+ quota_usages.create_column(user_id)
+
+ # Add 'user_id' column to reservations table.
+ reservations = Table('reservations', meta, autoload=True)
+ user_id = Column('user_id',
+ String(length=255, convert_unicode=False,
+ assert_unicode=None, unicode_error=None,
+ _warn_on_bytestring=False))
+ reservations.create_column(user_id)
+
+ # New table.
+ user_quotas = Table('user_quotas', meta,
+ Column('id', Integer(), primary_key=True),
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(), default=False),
+ Column('user_id',
+ String(length=255, convert_unicode=False,
+ assert_unicode=None, unicode_error=None,
+ _warn_on_bytestring=False)),
+ Column('project_id',
+ String(length=255, convert_unicode=False,
+ assert_unicode=None, unicode_error=None,
+ _warn_on_bytestring=False)),
+ Column('resource',
+ String(length=255, convert_unicode=False,
+ assert_unicode=None, unicode_error=None,
+ _warn_on_bytestring=False),
+ nullable=False),
+ Column('hard_limit', Integer(), nullable=True),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ try:
+ user_quotas.create()
+ except Exception:
+ LOG.error(_("Table |%s| not created!"), repr(user_quotas))
+ raise
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index fa3ebe8f0..7d4435a7a 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -428,19 +428,6 @@ class Quota(BASE, NovaBase):
hard_limit = Column(Integer, nullable=True)
-class UserQuota(BASE, NovaBase):
- """Represents a single quota override for a user."""
-
- __tablename__ = 'user_quotas'
- id = Column(Integer, primary_key=True)
-
- user_id = Column(String(255), index=True)
- project_id = Column(String(255), index=True)
-
- resource = Column(String(255))
- hard_limit = Column(Integer, nullable=True)
-
-
class QuotaClass(BASE, NovaBase):
"""Represents a single quota override for a quota class.
@@ -464,7 +451,6 @@ class QuotaUsage(BASE, NovaBase):
__tablename__ = 'quota_usages'
id = Column(Integer, primary_key=True)
- user_id = Column(String(255), index=True)
project_id = Column(String(255), index=True)
resource = Column(String(255))
@@ -487,7 +473,6 @@ class Reservation(BASE, NovaBase):
usage_id = Column(Integer, ForeignKey('quota_usages.id'), nullable=False)
- user_id = Column(String(255), index=True)
project_id = Column(String(255), index=True)
resource = Column(String(255))
diff --git a/nova/exception.py b/nova/exception.py
index 86d3a59b2..5d232bdd8 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -386,6 +386,10 @@ class InvalidDevicePath(Invalid):
message = _("The supplied device path (%(path)s) is invalid.")
+class DevicePathInUse(Invalid):
+ message = _("The supplied device path (%(path)s) is in use.")
+
+
class DeviceIsBusy(Invalid):
message = _("The supplied device (%(device)s) is busy.")
@@ -746,11 +750,6 @@ class QuotaResourceUnknown(QuotaNotFound):
message = _("Unknown quota resources %(unknown)s.")
-class UserQuotaNotFound(QuotaNotFound):
- message = _("Quota for user %(user_id)s in project %(project_id)s "
- "could not be found.")
-
-
class ProjectQuotaNotFound(QuotaNotFound):
message = _("Quota for project %(project_id)s could not be found.")
diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py
index f0ffa8b86..b08eb0a62 100644
--- a/nova/network/linux_net.py
+++ b/nova/network/linux_net.py
@@ -125,7 +125,8 @@ class IptablesRule(object):
chain = '%s-%s' % (binary_name, self.chain)
else:
chain = self.chain
- return '-A %s %s' % (chain, self.rule)
+ # new rules should have a zero [packet: byte] count
+ return '[0:0] -A %s %s' % (chain, self.rule)
class IptablesTable(object):
@@ -133,8 +134,10 @@ class IptablesTable(object):
def __init__(self):
self.rules = []
+ self.remove_rules = []
self.chains = set()
self.unwrapped_chains = set()
+ self.remove_chains = set()
def add_chain(self, name, wrap=True):
"""Adds a named chain to the table.
@@ -172,7 +175,13 @@ class IptablesTable(object):
name)
return
+ # non-wrapped chains and rules need to be dealt with specially,
+ # so we keep a list of them to be iterated over in apply()
+ if not wrap:
+ self.remove_chains.add(name)
chain_set.remove(name)
+ if not wrap:
+ self.remove_rules += filter(lambda r: r.chain == name, self.rules)
self.rules = filter(lambda r: r.chain != name, self.rules)
if wrap:
@@ -180,6 +189,9 @@ class IptablesTable(object):
else:
jump_snippet = '-j %s' % (name,)
+ if not wrap:
+ self.remove_rules += filter(lambda r: jump_snippet in r.rule,
+ self.rules)
self.rules = filter(lambda r: jump_snippet not in r.rule, self.rules)
def add_rule(self, chain, rule, wrap=True, top=False):
@@ -216,6 +228,8 @@ class IptablesTable(object):
"""
try:
self.rules.remove(IptablesRule(chain, rule, wrap, top))
+ if not wrap:
+ self.remove_rules.append(IptablesRule(chain, rule, wrap, top))
except ValueError:
LOG.warn(_('Tried to remove rule that was not there:'
' %(chain)r %(rule)r %(wrap)r %(top)r'),
@@ -342,14 +356,14 @@ class IptablesManager(object):
for cmd, tables in s:
for table in tables:
- current_table, _err = self.execute('%s-save' % (cmd,),
+ current_table, _err = self.execute('%s-save' % (cmd,), '-c',
'-t', '%s' % (table,),
run_as_root=True,
attempts=5)
current_lines = current_table.split('\n')
new_filter = self._modify_rules(current_lines,
tables[table])
- self.execute('%s-restore' % (cmd,), run_as_root=True,
+ self.execute('%s-restore' % (cmd,), '-c', run_as_root=True,
process_input='\n'.join(new_filter),
attempts=5)
LOG.debug(_("IPTablesManager.apply completed with success"))
@@ -357,7 +371,9 @@ class IptablesManager(object):
def _modify_rules(self, current_lines, table, binary=None):
unwrapped_chains = table.unwrapped_chains
chains = table.chains
+ remove_chains = table.remove_chains
rules = table.rules
+ remove_rules = table.remove_rules
# Remove any trace of our rules
new_filter = filter(lambda line: binary_name not in line,
@@ -374,15 +390,42 @@ class IptablesManager(object):
break
our_rules = []
+ bot_rules = []
for rule in rules:
rule_str = str(rule)
if rule.top:
# rule.top == True means we want this rule to be at the top.
# Further down, we weed out duplicates from the bottom of the
# list, so here we remove the dupes ahead of time.
- new_filter = filter(lambda s: s.strip() != rule_str.strip(),
+
+ # We don't want to remove an entry if it has non-zero
+ # [packet:byte] counts and replace it with [0:0], so let's
+ # go look for a duplicate, and over-ride our table rule if
+ # found.
+
+ # ignore [packet:byte] counts at beginning of line
+ if rule_str.startswith('['):
+ rule_str = rule_str.split(']', 1)[1]
+ dup_filter = filter(lambda s: rule_str.strip() in s.strip(),
+ new_filter)
+
+ new_filter = filter(lambda s:
+ rule_str.strip() not in s.strip(),
new_filter)
- our_rules += [rule_str]
+ # if no duplicates, use original rule
+ if dup_filter:
+ # grab the last entry, if there is one
+ dup = dup_filter[-1]
+ rule_str = str(dup)
+ else:
+ rule_str = str(rule)
+ rule_str.strip()
+
+ our_rules += [rule_str]
+ else:
+ bot_rules += [rule_str]
+
+ our_rules += bot_rules
new_filter[rules_index:rules_index] = our_rules
@@ -395,6 +438,9 @@ class IptablesManager(object):
seen_lines = set()
def _weed_out_duplicates(line):
+ # ignore [packet:byte] counts at beginning of lines
+ if line.startswith('['):
+ line = line.split(']', 1)[1]
line = line.strip()
if line in seen_lines:
return False
@@ -402,11 +448,48 @@ class IptablesManager(object):
seen_lines.add(line)
return True
+ def _weed_out_removes(line):
+ # We need to find exact matches here
+ if line.startswith(':'):
+ # it's a chain, for example, ":nova-billing - [0:0]"
+ # strip off everything except the chain name
+ line = line.split(':')[1]
+ line = line.split('- [')[0]
+ line = line.strip()
+ for chain in remove_chains:
+ if chain == line:
+ remove_chains.remove(chain)
+ return False
+ elif line.startswith('['):
+ # it's a rule
+ # ignore [packet:byte] counts at beginning of lines
+ line = line.split(']', 1)[1]
+ line = line.strip()
+ for rule in remove_rules:
+ # ignore [packet:byte] counts at beginning of rules
+ rule_str = str(rule)
+ rule_str = rule_str.split(' ', 1)[1]
+ rule_str = rule_str.strip()
+ if rule_str == line:
+ remove_rules.remove(rule)
+ return False
+
+ # Leave it alone
+ return True
+
# We filter duplicates, letting the *last* occurrence take
- # precedence.
+ # precendence. We also filter out anything in the "remove"
+ # lists.
new_filter.reverse()
new_filter = filter(_weed_out_duplicates, new_filter)
+ new_filter = filter(_weed_out_removes, new_filter)
new_filter.reverse()
+
+ # flush lists, just in case we didn't find something
+ remove_chains.clear()
+ for rule in remove_rules:
+ remove_rules.remove(rule)
+
return new_filter
@@ -424,12 +507,19 @@ def ensure_path(path):
def metadata_forward():
"""Create forwarding rule for metadata."""
- iptables_manager.ipv4['nat'].add_rule('PREROUTING',
+ if FLAGS.metadata_host != '127.0.0.1':
+ iptables_manager.ipv4['nat'].add_rule('PREROUTING',
'-s 0.0.0.0/0 -d 169.254.169.254/32 '
'-p tcp -m tcp --dport 80 -j DNAT '
'--to-destination %s:%s' %
(FLAGS.metadata_host,
FLAGS.metadata_port))
+ else:
+ iptables_manager.ipv4['nat'].add_rule('PREROUTING',
+ '-s 0.0.0.0/0 -d 169.254.169.254/32 '
+ '-p tcp -m tcp --dport 80 '
+ '-j REDIRECT --to-ports %s' %
+ FLAGS.metadata_port)
iptables_manager.apply()
diff --git a/nova/network/nova_ipam_lib.py b/nova/network/nova_ipam_lib.py
index be8858983..6b6897156 100644
--- a/nova/network/nova_ipam_lib.py
+++ b/nova/network/nova_ipam_lib.py
@@ -16,7 +16,6 @@
# under the License.
from nova import db
-from nova import exception
from nova import ipv6
from nova.openstack.common import log as logging
diff --git a/nova/notifications.py b/nova/notifications.py
index 913d01cdd..019db3388 100644
--- a/nova/notifications.py
+++ b/nova/notifications.py
@@ -110,7 +110,7 @@ def send_update_with_states(context, instance, old_vm_state, new_vm_state,
if verify_states:
# check whether we need to send notification related to state changes
fire_update = False
- # do not send notification if the confitions for vm and(or) task state
+ # do not send notification if the conditions for vm and(or) task state
# are not satisfied
if old_vm_state != new_vm_state:
# yes, the vm state is changing:
@@ -122,7 +122,7 @@ def send_update_with_states(context, instance, old_vm_state, new_vm_state,
fire_update = True
if fire_update:
- # send either a state change or a regular notificaion
+ # send either a state change or a regular notification
try:
_send_instance_update_notification(context, instance,
old_vm_state=old_vm_state, old_task_state=old_task_state,
diff --git a/nova/openstack/common/cfg.py b/nova/openstack/common/cfg.py
index c0b0161ff..36e5e0ab0 100644
--- a/nova/openstack/common/cfg.py
+++ b/nova/openstack/common/cfg.py
@@ -367,6 +367,11 @@ class ConfigFileValueError(Error):
pass
+def _fixpath(p):
+ """Apply tilde expansion and absolutization to a path."""
+ return os.path.abspath(os.path.expanduser(p))
+
+
def _get_config_dirs(project=None):
"""Return a list of directors where config files may be located.
@@ -384,11 +389,9 @@ def _get_config_dirs(project=None):
~/
/etc/
"""
- fix_path = lambda p: os.path.abspath(os.path.expanduser(p))
-
cfg_dirs = [
- fix_path(os.path.join('~', '.' + project)) if project else None,
- fix_path('~'),
+ _fixpath(os.path.join('~', '.' + project)) if project else None,
+ _fixpath('~'),
os.path.join('/etc', project) if project else None,
'/etc'
]
@@ -809,7 +812,7 @@ class OptGroup(object):
if _is_opt_registered(self._opts, opt):
return False
- self._opts[opt.dest] = {'opt': opt, 'override': None, 'default': None}
+ self._opts[opt.dest] = {'opt': opt}
return True
@@ -1087,7 +1090,7 @@ class ConfigOpts(collections.Mapping):
if _is_opt_registered(self._opts, opt):
return False
- self._opts[opt.dest] = {'opt': opt, 'override': None, 'default': None}
+ self._opts[opt.dest] = {'opt': opt}
return True
@@ -1156,6 +1159,25 @@ class ConfigOpts(collections.Mapping):
for opt in opts:
self.unregister_opt(opt, group, clear_cache=False)
+ def import_opt(self, name, module_str, group=None):
+ """Import an option definition from a module.
+
+ Import a module and check that a given option is registered.
+
+ This is intended for use with global configuration objects
+ like cfg.CONF where modules commonly register options with
+ CONF at module load time. If one module requires an option
+ defined by another module it can use this method to explicitly
+ declare the dependency.
+
+ :param name: the name/dest of the opt
+ :param module_str: the name of a module to import
+ :param group: an option OptGroup object or group name
+ :raises: NoSuchOptError, NoSuchGroupError
+ """
+ __import__(module_str)
+ self._get_opt_info(name, group)
+
@__clear_cache
def set_override(self, name, override, group=None):
"""Override an opt value.
@@ -1186,6 +1208,33 @@ class ConfigOpts(collections.Mapping):
opt_info = self._get_opt_info(name, group)
opt_info['default'] = default
+ @__clear_cache
+ def clear_override(self, name, group=None):
+ """Clear an override an opt value.
+
+ Clear a previously set override of the command line, config file
+ and default values of a given option.
+
+ :param name: the name/dest of the opt
+ :param group: an option OptGroup object or group name
+ :raises: NoSuchOptError, NoSuchGroupError
+ """
+ opt_info = self._get_opt_info(name, group)
+ opt_info.pop('override', None)
+
+ @__clear_cache
+ def clear_default(self, name, group=None):
+ """Clear an override an opt's default value.
+
+ Clear a previously set override of the default value of given option.
+
+ :param name: the name/dest of the opt
+ :param group: an option OptGroup object or group name
+ :raises: NoSuchOptError, NoSuchGroupError
+ """
+ opt_info = self._get_opt_info(name, group)
+ opt_info.pop('default', None)
+
def _all_opt_infos(self):
"""A generator function for iteration opt infos."""
for info in self._opts.values():
@@ -1202,8 +1251,8 @@ class ConfigOpts(collections.Mapping):
def _unset_defaults_and_overrides(self):
"""Unset any default or override on all options."""
for info, group in self._all_opt_infos():
- info['default'] = None
- info['override'] = None
+ info.pop('default', None)
+ info.pop('override', None)
def disable_interspersed_args(self):
"""Set parsing to stop on the first non-option.
@@ -1249,10 +1298,10 @@ class ConfigOpts(collections.Mapping):
"""
dirs = []
if self.config_dir:
- dirs.append(self.config_dir)
+ dirs.append(_fixpath(self.config_dir))
for cf in reversed(self.config_file):
- dirs.append(os.path.dirname(cf))
+ dirs.append(os.path.dirname(_fixpath(cf)))
dirs.extend(_get_config_dirs(self.project))
@@ -1326,10 +1375,10 @@ class ConfigOpts(collections.Mapping):
return self.GroupAttr(self, self._get_group(name))
info = self._get_opt_info(name, group)
- default, opt, override = [info[k] for k in sorted(info.keys())]
+ opt = info['opt']
- if override is not None:
- return override
+ if 'override' in info:
+ return info['override']
values = []
if self._cparser is not None:
@@ -1357,8 +1406,8 @@ class ConfigOpts(collections.Mapping):
if values:
return values
- if default is not None:
- return default
+ if 'default' in info:
+ return info['default']
return opt.default
@@ -1433,6 +1482,8 @@ class ConfigOpts(collections.Mapping):
config_dir_glob = os.path.join(self.config_dir, '*.conf')
config_files += sorted(glob.glob(config_dir_glob))
+ config_files = [_fixpath(p) for p in config_files]
+
self._cparser = MultiConfigParser()
try:
@@ -1450,10 +1501,10 @@ class ConfigOpts(collections.Mapping):
:raises: RequiredOptError
"""
for info, group in self._all_opt_infos():
- default, opt, override = [info[k] for k in sorted(info.keys())]
+ opt = info['opt']
if opt.required:
- if (default is not None or override is not None):
+ if ('default' in info or 'override' in info):
continue
if self._get(opt.name, group) is None:
diff --git a/nova/openstack/common/notifier/api.py b/nova/openstack/common/notifier/api.py
index f90a4eba8..95573dd65 100644
--- a/nova/openstack/common/notifier/api.py
+++ b/nova/openstack/common/notifier/api.py
@@ -13,7 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import inspect
import uuid
from nova.openstack.common import cfg
diff --git a/nova/openstack/common/plugin/pluginmanager.py b/nova/openstack/common/plugin/pluginmanager.py
index 29656477d..51d06d851 100644
--- a/nova/openstack/common/plugin/pluginmanager.py
+++ b/nova/openstack/common/plugin/pluginmanager.py
@@ -13,8 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import imp
-import os
import pkg_resources
from nova.openstack.common import cfg
diff --git a/nova/openstack/common/rpc/common.py b/nova/openstack/common/rpc/common.py
index 01db756eb..eb3416804 100644
--- a/nova/openstack/common/rpc/common.py
+++ b/nova/openstack/common/rpc/common.py
@@ -19,10 +19,8 @@
import copy
import logging
-import sys
import traceback
-from nova.openstack.common import cfg
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
diff --git a/nova/openstack/common/rpc/impl_zmq.py b/nova/openstack/common/rpc/impl_zmq.py
index 6f215891b..8d6c73206 100644
--- a/nova/openstack/common/rpc/impl_zmq.py
+++ b/nova/openstack/common/rpc/impl_zmq.py
@@ -639,26 +639,22 @@ def create_connection(conf, new=True):
def multicall(conf, *args, **kwargs):
"""Multiple calls."""
- register_opts(conf)
return _multi_send(_call, *args, **kwargs)
def call(conf, *args, **kwargs):
"""Send a message, expect a response."""
- register_opts(conf)
data = _multi_send(_call, *args, **kwargs)
return data[-1]
def cast(conf, *args, **kwargs):
"""Send a message expecting no reply."""
- register_opts(conf)
_multi_send(_cast, *args, **kwargs)
def fanout_cast(conf, context, topic, msg, **kwargs):
"""Send a message to all listening and expect no reply."""
- register_opts(conf)
# NOTE(ewindisch): fanout~ is used because it avoid splitting on .
# and acts as a non-subtle hint to the matchmaker and ZmqProxy.
_multi_send(_cast, context, 'fanout~' + str(topic), msg, **kwargs)
@@ -670,7 +666,6 @@ def notify(conf, context, topic, msg, **kwargs):
Notifications are sent to topic-priority.
This differs from the AMQP drivers which send to topic.priority.
"""
- register_opts(conf)
# NOTE(ewindisch): dot-priority in rpc notifier does not
# work with our assumptions.
topic.replace('.', '-')
diff --git a/nova/openstack/common/timeutils.py b/nova/openstack/common/timeutils.py
index 4416a3b19..ae300e456 100644
--- a/nova/openstack/common/timeutils.py
+++ b/nova/openstack/common/timeutils.py
@@ -106,3 +106,21 @@ def advance_time_seconds(seconds):
def clear_time_override():
"""Remove the overridden time."""
utcnow.override_time = None
+
+
+def marshall_now(now=None):
+ """Make an rpc-safe datetime with microseconds.
+
+ Note: tzinfo is stripped, but not required for relative times."""
+ if not now:
+ now = utcnow()
+ return dict(day=now.day, month=now.month, year=now.year, hour=now.hour,
+ minute=now.minute, second=now.second,
+ microsecond=now.microsecond)
+
+
+def unmarshall_time(tyme):
+ """Unmarshall a datetime dict."""
+ return datetime.datetime(day=tyme['day'], month=tyme['month'],
+ year=tyme['year'], hour=tyme['hour'], minute=tyme['minute'],
+ second=tyme['second'], microsecond=tyme['microsecond'])
diff --git a/nova/policy.py b/nova/policy.py
index 94bbbdd93..acfe830b9 100644
--- a/nova/policy.py
+++ b/nova/policy.py
@@ -92,3 +92,23 @@ def enforce(context, action, target):
policy.enforce(match_list, target, credentials,
exception.PolicyNotAuthorized, action=action)
+
+
+def check_admin_role(roles):
+ """Whether or not roles contains 'admin' role according to policy setting.
+
+ """
+ init()
+
+ action = 'admin'
+ match_list = ('rule:%s' % action,)
+ target = {}
+ credentials = {'roles': roles}
+
+ try:
+ policy.enforce(match_list, target, credentials,
+ exception.PolicyNotAuthorized, action=action)
+ except exception.PolicyNotAuthorized:
+ return False
+
+ return True
diff --git a/nova/quota.py b/nova/quota.py
index 44e3c593d..d3ba0aa02 100644
--- a/nova/quota.py
+++ b/nova/quota.py
@@ -101,11 +101,6 @@ class DbQuotaDriver(object):
return db.quota_get(context, project_id, resource)
- def get_by_user(self, context, user_id, project_id, resource):
- """Get a specific quota by user."""
-
- return db.quota_get_for_user(context, user_id, project_id, resource)
-
def get_by_class(self, context, quota_class, resource):
"""Get a specific quota by quota class."""
@@ -148,16 +143,16 @@ class DbQuotaDriver(object):
return quotas
- def _process_quotas(self, context, resources, project_id, quotas,
- quota_class=None, defaults=True, usages=None):
+ def get_project_quotas(self, context, resources, project_id,
+ quota_class=None, defaults=True,
+ usages=True):
"""
- Given a list of resources, process the quotas for the given
- quotas and usages.
+ Given a list of resources, retrieve the quotas for the given
+ project.
:param context: The request context, for access checks.
:param resources: A dictionary of the registered resources.
:param project_id: The ID of the project to return quotas for.
- :param quotas: The quotas dictionary need to be processed.
:param quota_class: If project_id != context.project_id, the
quota class cannot be determined. This
parameter allows it to be specified. It
@@ -167,11 +162,16 @@ class DbQuotaDriver(object):
default value, if there is no value from the
quota class) will be reported if there is no
specific value for the resource.
- :param usages: If not None, the current in_use and reserved counts
+ :param usages: If True, the current in_use and reserved counts
will also be returned.
"""
- modified_quotas = {}
+ quotas = {}
+ project_quotas = db.quota_get_all_by_project(context, project_id)
+ if usages:
+ project_usages = db.quota_usage_get_all_by_project(context,
+ project_id)
+
# Get the quotas for the appropriate class. If the project ID
# matches the one in the context, we use the quota_class from
# the context, otherwise, we use the provided quota_class (if
@@ -185,11 +185,11 @@ class DbQuotaDriver(object):
for resource in resources.values():
# Omit default/quota class values
- if not defaults and resource.name not in quotas:
+ if not defaults and resource.name not in project_quotas:
continue
- modified_quotas[resource.name] = dict(
- limit=quotas.get(resource.name, class_quotas.get(
+ quotas[resource.name] = dict(
+ limit=project_quotas.get(resource.name, class_quotas.get(
resource.name, resource.default)),
)
@@ -197,96 +197,13 @@ class DbQuotaDriver(object):
# internal consumer of this interface wants to access the
# usages directly from inside a transaction.
if usages:
- usage = usages.get(resource.name, {})
- modified_quotas[resource.name].update(
+ usage = project_usages.get(resource.name, {})
+ quotas[resource.name].update(
in_use=usage.get('in_use', 0),
reserved=usage.get('reserved', 0),
)
- return modified_quotas
-
- def get_project_quotas(self, context, resources, project_id,
- quota_class=None, defaults=True,
- usages=True):
- """
- Given a list of resources, retrieve the quotas for the given
- project.
-
- :param context: The request context, for access checks.
- :param resources: A dictionary of the registered resources.
- :param project_id: The ID of the project to return quotas for.
- :param quota_class: If project_id != context.project_id, the
- quota class cannot be determined. This
- parameter allows it to be specified. It
- will be ignored if project_id ==
- context.project_id.
- :param defaults: If True, the quota class value (or the
- default value, if there is no value from the
- quota class) will be reported if there is no
- specific value for the resource.
- :param usages: If True, the current in_use and reserved counts
- will also be returned.
- """
-
- project_quotas = db.quota_get_all_by_project(context, project_id)
-
- project_usages = None
- if usages:
- project_usages = db.quota_usage_get_all_by_project(context,
- project_id)
-
- return self._process_quotas(context, resources,
- project_id, project_quotas,
- quota_class=quota_class,
- defaults=defaults,
- usages=project_usages)
-
- def get_user_quotas(self, context, resources, user_id, project_id,
- quota_class=None, defaults=True,
- usages=True):
- """
- Given a list of resources, retrieve the quotas for the given
- user.
-
- :param context: The request context, for access checks.
- :param resources: A dictionary of the registered resources.
- :param project_id: The ID of the project to return quotas for.
- :param user_id: The ID of the user to return quotas for.
- :param quota_class: If project_id != context.project_id, the
- quota class cannot be determined. This
- parameter allows it to be specified. It
- will be ignored if project_id ==
- context.project_id.
- :param defaults: If True, the quota class value (or the
- default value, if there is no value from the
- quota class) will be reported if there is no
- specific value for the resource.
- :param usages: If True, the current in_use and reserved counts
- will also be returned.
- """
-
- user_quotas = db.quota_get_all_by_user(context, user_id, project_id)
-
- user_usages = None
- if usages:
- user_usages = db.quota_usage_get_all_by_user(context,
- user_id,
- project_id)
-
- return self._process_quotas(context, resources,
- project_id, user_quotas,
- quota_class=quota_class,
- defaults=defaults,
- usages=user_usages)
-
- def get_remaining_quotas(self, context, project_id, resources):
- """Get the remaining quotas for the given project."""
- defaults = self.get_defaults(context, resources)
- quotas = db.quota_get_remaining(context, project_id)
- for key in defaults.keys():
- if key in quotas:
- defaults[key] = quotas[key]
- return defaults
+ return quotas
def _get_quotas(self, context, resources, keys, has_sync):
"""
@@ -318,10 +235,9 @@ class DbQuotaDriver(object):
raise exception.QuotaResourceUnknown(unknown=sorted(unknown))
# Grab and return the quotas (without usages)
- quotas = self.get_user_quotas(context, sub_resources,
- context.user_id,
- context.project_id,
- context.quota_class, usages=False)
+ quotas = self.get_project_quotas(context, sub_resources,
+ context.project_id,
+ context.quota_class, usages=False)
return dict((k, v['limit']) for k, v in quotas.items())
@@ -452,18 +368,6 @@ class DbQuotaDriver(object):
db.quota_destroy_all_by_project(context, project_id)
- def destroy_all_by_user(self, context, user_id, project_id):
- """
- Destroy all quotas, usages, and reservations associated with a
- user.
-
- :param context: The request context, for access checks.
- :param user_id: The ID of the user being deleted.
- :param project_id: The ID of the project being deleted.
- """
-
- db.quota_destroy_all_by_user(context, user_id, project_id)
-
def expire(self, context):
"""Expire reservations.
@@ -662,11 +566,6 @@ class QuotaEngine(object):
return self._driver.get_by_project(context, project_id, resource)
- def get_by_user(self, context, user_id, project_id, resource):
- """Get a specific quota by user."""
-
- return self._driver.get_by_user(context, user_id, project_id, resource)
-
def get_by_class(self, context, quota_class, resource):
"""Get a specific quota by quota class."""
@@ -712,46 +611,10 @@ class QuotaEngine(object):
"""
return self._driver.get_project_quotas(context, self._resources,
- project_id,
- quota_class=quota_class,
- defaults=defaults,
- usages=usages)
-
- def get_user_quotas(self, context, user_id, project_id,
- quota_class=None, defaults=True,
- usages=True):
- """Retrieve the quotas for the given user.
-
- :param context: The request context, for access checks.
- :param user_id: The ID of the user to return quotas for.
- :param project_id: The ID of the project to return quotas for.
- :param quota_class: If project_id != context.project_id, the
- quota class cannot be determined. This
- parameter allows it to be specified.
- :param defaults: If True, the quota class value (or the
- default value, if there is no value from the
- quota class) will be reported if there is no
- specific value for the resource.
- :param usages: If True, the current in_use and reserved counts
- will also be returned.
- """
-
- return self._driver.get_user_quotas(context, self._resources,
- user_id,
- project_id,
- quota_class=quota_class,
- defaults=defaults,
- usages=usages)
-
- def get_remaining_quotas(self, context, project_id):
- """Retrieve the remaining quotas for the given project.
-
- :param context: The request context, for access checks.
- :param project_id: The ID of the project to return quotas for.
- """
-
- return self._driver.get_remaining_quotas(context, project_id,
- self._resources)
+ project_id,
+ quota_class=quota_class,
+ defaults=defaults,
+ usages=usages)
def count(self, context, resource, *args, **kwargs):
"""Count a resource.
@@ -882,18 +745,6 @@ class QuotaEngine(object):
self._driver.destroy_all_by_project(context, project_id)
- def destroy_all_by_user(self, context, user_id, project_id):
- """
- Destroy all quotas, usages, and reservations associated with a
- user.
-
- :param context: The request context, for access checks.
- :param user_id: The ID of the user being deleted.
- :param project_id: The ID of the project being deleted.
- """
-
- self._driver.destroy_all_by_user(context, user_id, project_id)
-
def expire(self, context):
"""Expire reservations.
@@ -910,26 +761,26 @@ class QuotaEngine(object):
return sorted(self._resources.keys())
-def _sync_instances(context, user_id, project_id, session):
+def _sync_instances(context, project_id, session):
return dict(zip(('instances', 'cores', 'ram'),
- db.instance_data_get_for_user(
- context, user_id, project_id, session=session)))
+ db.instance_data_get_for_project(
+ context, project_id, session=session)))
-def _sync_volumes(context, user_id, project_id, session):
+def _sync_volumes(context, project_id, session):
return dict(zip(('volumes', 'gigabytes'),
- db.volume_data_get_for_user(
- context, user_id, project_id, session=session)))
+ db.volume_data_get_for_project(
+ context, project_id, session=session)))
-def _sync_floating_ips(context, user_id, project_id, session):
+def _sync_floating_ips(context, project_id, session):
return dict(floating_ips=db.floating_ip_count_by_project(
context, project_id, session=session))
-def _sync_security_groups(context, user_id, project_id, session):
- return dict(security_groups=db.security_group_count_by_user(
- context, user_id, project_id, session=session))
+def _sync_security_groups(context, project_id, session):
+ return dict(security_groups=db.security_group_count_by_project(
+ context, project_id, session=session))
QUOTAS = QuotaEngine()
diff --git a/nova/scheduler/chance.py b/nova/scheduler/chance.py
index 20955b642..9731a0a07 100644
--- a/nova/scheduler/chance.py
+++ b/nova/scheduler/chance.py
@@ -65,6 +65,32 @@ class ChanceScheduler(driver.Scheduler):
requested_networks, is_first_time,
filter_properties, reservations):
"""Create and run an instance or instances"""
+ if 'instance_uuids' not in request_spec:
+ return self._legacy_schedule_run_instance(context, request_spec,
+ admin_password, injected_files, requested_networks,
+ is_first_time, filter_properties, reservations)
+ instances = []
+ instance_uuids = request_spec.get('instance_uuids')
+ for num, instance_uuid in enumerate(instance_uuids):
+ host = self._schedule(context, 'compute', request_spec,
+ filter_properties)
+ request_spec['instance_properties']['launch_index'] = num
+ updated_instance = driver.instance_update_db(context,
+ instance_uuid, host)
+ self.compute_rpcapi.run_instance(context,
+ instance=updated_instance, host=host,
+ requested_networks=requested_networks,
+ injected_files=injected_files,
+ admin_password=admin_password, is_first_time=is_first_time,
+ request_spec=request_spec,
+ filter_properties=filter_properties)
+ instances.append(driver.encode_instance(updated_instance))
+ return instances
+
+ def _legacy_schedule_run_instance(self, context, request_spec,
+ admin_password, injected_files,
+ requested_networks, is_first_time,
+ filter_properties, reservations):
num_instances = request_spec.get('num_instances', 1)
instances = []
for num in xrange(num_instances):
diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py
index 3a71a6a87..7522ec6bf 100644
--- a/nova/scheduler/driver.py
+++ b/nova/scheduler/driver.py
@@ -296,7 +296,7 @@ class Scheduler(object):
def _live_migration_common_check(self, context, instance_ref, dest):
"""Live migration common check routine.
- Below checkings are followed by
+ The following checks are based on
http://wiki.libvirt.org/page/TodoPreMigrationChecks
:param context: security context
@@ -313,7 +313,7 @@ class Scheduler(object):
if orig_hypervisor != dest_hypervisor:
raise exception.InvalidHypervisorType()
- # Checkng hypervisor version.
+ # Checking hypervisor version.
orig_hypervisor = oservice_ref['hypervisor_version']
dest_hypervisor = dservice_ref['hypervisor_version']
if orig_hypervisor > dest_hypervisor:
@@ -334,7 +334,7 @@ class Scheduler(object):
# Getting total used memory and disk of host
# It should be sum of memories that are assigned as max value,
- # because overcommiting is risky.
+ # because overcommitting is risky.
instance_refs = db.instance_get_all_by_host(context, dest)
used = sum([i['memory_mb'] for i in instance_refs])
diff --git a/nova/scheduler/filter_scheduler.py b/nova/scheduler/filter_scheduler.py
index f34df1568..9eea8dea6 100644
--- a/nova/scheduler/filter_scheduler.py
+++ b/nova/scheduler/filter_scheduler.py
@@ -61,6 +61,56 @@ class FilterScheduler(driver.Scheduler):
Returns a list of the instances created.
"""
+ if 'instance_uuids' not in request_spec:
+ return self._legacy_schedule_run_instance(context, request_spec,
+ admin_password, injected_files, requested_networks,
+ is_first_time, filter_properties, reservations)
+ elevated = context.elevated()
+ instance_uuids = request_spec.get('instance_uuids')
+ num_instances = len(instance_uuids)
+ LOG.debug(_("Attempting to build %(num_instances)d instance(s)") %
+ locals())
+
+ payload = dict(request_spec=request_spec)
+ notifier.notify(context, notifier.publisher_id("scheduler"),
+ 'scheduler.run_instance.start', notifier.INFO, payload)
+
+ weighted_hosts = self._schedule(context, "compute", request_spec,
+ filter_properties, instance_uuids)
+
+ if not weighted_hosts:
+ raise exception.NoValidHost(reason="")
+
+ # NOTE(comstud): Make sure we do not pass this through. It
+ # contains an instance of RpcContext that cannot be serialized.
+ filter_properties.pop('context', None)
+
+ for num, instance_uuid in enumerate(instance_uuids):
+ if not weighted_hosts:
+ break
+ weighted_host = weighted_hosts.pop(0)
+
+ request_spec['instance_properties']['launch_index'] = num
+
+ self._provision_resource(elevated, weighted_host,
+ request_spec,
+ filter_properties,
+ requested_networks,
+ injected_files, admin_password,
+ is_first_time,
+ instance_uuid=instance_uuid)
+ # scrub retry host list in case we're scheduling multiple
+ # instances:
+ retry = filter_properties.get('retry', {})
+ retry['hosts'] = []
+
+ notifier.notify(context, notifier.publisher_id("scheduler"),
+ 'scheduler.run_instance.end', notifier.INFO, payload)
+
+ def _legacy_schedule_run_instance(self, context, request_spec,
+ admin_password, injected_files,
+ requested_networks, is_first_time,
+ filter_properties, reservations):
elevated = context.elevated()
num_instances = request_spec.get('num_instances', 1)
LOG.debug(_("Attempting to build %(num_instances)d instance(s)") %
@@ -89,11 +139,12 @@ class FilterScheduler(driver.Scheduler):
request_spec['instance_properties']['launch_index'] = num
instance = self._provision_resource(elevated, weighted_host,
- request_spec, reservations,
+ request_spec,
filter_properties,
requested_networks,
injected_files, admin_password,
- is_first_time)
+ is_first_time,
+ reservations=reservations)
# scrub retry host list in case we're scheduling multiple
# instances:
retry = filter_properties.get('retry', {})
@@ -117,7 +168,7 @@ class FilterScheduler(driver.Scheduler):
"""
hosts = self._schedule(context, 'compute', request_spec,
- filter_properties)
+ filter_properties, [instance['uuid']])
if not hosts:
raise exception.NoValidHost(reason="")
host = hosts.pop(0)
@@ -127,24 +178,26 @@ class FilterScheduler(driver.Scheduler):
instance_type, host.host_state.host, reservations)
def _provision_resource(self, context, weighted_host, request_spec,
- reservations, filter_properties, requested_networks,
- injected_files, admin_password, is_first_time):
+ filter_properties, requested_networks, injected_files,
+ admin_password, is_first_time, reservations=None,
+ instance_uuid=None):
"""Create the requested resource in this Zone."""
- instance = self.create_instance_db_entry(context, request_spec,
- reservations)
-
+ if reservations:
+ instance = self.create_instance_db_entry(context, request_spec,
+ reservations)
+ instance_uuid = instance['uuid']
# Add a retry entry for the selected compute host:
self._add_retry_host(filter_properties, weighted_host.host_state.host)
payload = dict(request_spec=request_spec,
weighted_host=weighted_host.to_dict(),
- instance_id=instance['uuid'])
+ instance_id=instance_uuid)
notifier.notify(context, notifier.publisher_id("scheduler"),
'scheduler.run_instance.scheduled', notifier.INFO,
payload)
- updated_instance = driver.instance_update_db(context, instance['uuid'],
- weighted_host.host_state.host)
+ updated_instance = driver.instance_update_db(context,
+ instance_uuid, weighted_host.host_state.host)
self.compute_rpcapi.run_instance(context, instance=updated_instance,
host=weighted_host.host_state.host,
@@ -153,17 +206,18 @@ class FilterScheduler(driver.Scheduler):
injected_files=injected_files,
admin_password=admin_password, is_first_time=is_first_time)
- inst = driver.encode_instance(updated_instance, local=True)
+ if reservations:
+ inst = driver.encode_instance(updated_instance, local=True)
- # So if another instance is created, create_instance_db_entry will
- # actually create a new entry, instead of assume it's been created
- # already
- del request_spec['instance_properties']['uuid']
+ # So if another instance is created, create_instance_db_entry will
+ # actually create a new entry, instead of assume it's been created
+ # already
+ del request_spec['instance_properties']['uuid']
- return inst
+ return inst
def _add_retry_host(self, filter_properties, host):
- """Add a retry entry for the selected computep host. In the event that
+ """Add a retry entry for the selected compute host. In the event that
the request gets re-scheduled, this entry will signal that the given
host has already been tried.
"""
@@ -212,11 +266,13 @@ class FilterScheduler(driver.Scheduler):
filter_properties['retry'] = retry
if retry['num_attempts'] > max_attempts:
- uuid = instance_properties.get('uuid', None)
- msg = _("Exceeded max scheduling attempts %d ") % max_attempts
- raise exception.NoValidHost(msg, instance_uuid=uuid)
+ instance_uuid = instance_properties.get('uuid')
+ msg = _("Exceeded max scheduling attempts %(max_attempts)d for "
+ "instance %(instance_uuid)s") % locals()
+ raise exception.NoValidHost(reason=msg)
- def _schedule(self, context, topic, request_spec, filter_properties):
+ def _schedule(self, context, topic, request_spec, filter_properties,
+ instance_uuids=None):
"""Returns a list of hosts that meet the required specs,
ordered by their fitness.
"""
@@ -231,8 +287,13 @@ class FilterScheduler(driver.Scheduler):
cost_functions = self.get_cost_functions()
config_options = self._get_configuration_options()
- # check retry policy:
- self._populate_retry(filter_properties, instance_properties)
+ # check retry policy. Rather ugly use of instance_uuids[0]...
+ # but if we've exceeded max retries... then we really only
+ # have a single instance.
+ properties = instance_properties.copy()
+ if instance_uuids:
+ properties['uuid'] = instance_uuids[0]
+ self._populate_retry(filter_properties, properties)
filter_properties.update({'context': context,
'request_spec': request_spec,
@@ -256,8 +317,11 @@ class FilterScheduler(driver.Scheduler):
# are being scanned in a filter or weighing function.
hosts = unfiltered_hosts_dict.itervalues()
- num_instances = request_spec.get('num_instances', 1)
selected_hosts = []
+ if instance_uuids:
+ num_instances = len(instance_uuids)
+ else:
+ num_instances = request_spec.get('num_instances', 1)
for num in xrange(num_instances):
# Filter local hosts based on requirements ...
hosts = self.host_manager.filter_hosts(hosts,
@@ -285,7 +349,7 @@ class FilterScheduler(driver.Scheduler):
instance_properties)
selected_hosts.sort(key=operator.attrgetter('weight'))
- return selected_hosts[:num_instances]
+ return selected_hosts
def get_cost_functions(self, topic=None):
"""Returns a list of tuples containing weights and cost functions to
diff --git a/nova/scheduler/filters/aggregate_instance_extra_specs.py b/nova/scheduler/filters/aggregate_instance_extra_specs.py
index 5851909be..b08adfd66 100644
--- a/nova/scheduler/filters/aggregate_instance_extra_specs.py
+++ b/nova/scheduler/filters/aggregate_instance_extra_specs.py
@@ -15,10 +15,8 @@
# under the License.
from nova import db
-from nova import exception
from nova.openstack.common import log as logging
from nova.scheduler import filters
-from nova import utils
LOG = logging.getLogger(__name__)
diff --git a/nova/scheduler/filters/all_hosts_filter.py b/nova/scheduler/filters/all_hosts_filter.py
index e3d18dda8..4e01a624e 100644
--- a/nova/scheduler/filters/all_hosts_filter.py
+++ b/nova/scheduler/filters/all_hosts_filter.py
@@ -18,7 +18,7 @@ from nova.scheduler import filters
class AllHostsFilter(filters.BaseHostFilter):
- """NOP host filter. Returns all hosts."""
+ """NOOP host filter. Returns all hosts."""
def host_passes(self, host_state, filter_properties):
return True
diff --git a/nova/scheduler/filters/compute_capabilities_filter.py b/nova/scheduler/filters/compute_capabilities_filter.py
index 3d67ccf54..b4c044370 100644
--- a/nova/scheduler/filters/compute_capabilities_filter.py
+++ b/nova/scheduler/filters/compute_capabilities_filter.py
@@ -12,10 +12,10 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-import operator
from nova.openstack.common import log as logging
from nova.scheduler import filters
+from nova.scheduler.filters import extra_specs_ops
LOG = logging.getLogger(__name__)
@@ -30,45 +30,10 @@ class ComputeCapabilitiesFilter(filters.BaseHostFilter):
if 'extra_specs' not in instance_type:
return True
- # 1. The following operations are supported:
- # =, s==, s!=, s>=, s>, s<=, s<, <in>, <or>, ==, !=, >=, <=
- # 2. Note that <or> is handled in a different way below.
- # 3. If the first word in the capability is not one of the operators,
- # it is ignored.
- op_methods = {'=': lambda x, y: float(x) >= float(y),
- '<in>': lambda x, y: y in x,
- '==': lambda x, y: float(x) == float(y),
- '!=': lambda x, y: float(x) != float(y),
- '>=': lambda x, y: float(x) >= float(y),
- '<=': lambda x, y: float(x) <= float(y),
- 's==': operator.eq,
- 's!=': operator.ne,
- 's<': operator.lt,
- 's<=': operator.le,
- 's>': operator.gt,
- 's>=': operator.ge}
-
for key, req in instance_type['extra_specs'].iteritems():
- words = req.split()
- if words:
- op = words[0]
- method = op_methods.get(op)
-
- if (op == '<or>' or method):
- cap = capabilities.get(key, None)
- if cap is None:
- return False
- if op == '<or>': # Ex: <or> v1 <or> v2 <or> v3
- for idx in range(1, len(words), 2):
- if words[idx] == cap:
- break
- else:
- return False
- else: # method
- if len(words) == 1:
- return False
- if not method(cap, words[1]):
- return False
+ cap = capabilities.get(key, None)
+ if not extra_specs_ops.match(cap, req):
+ return False
return True
def host_passes(self, host_state, filter_properties):
diff --git a/nova/scheduler/filters/compute_filter.py b/nova/scheduler/filters/compute_filter.py
index 2d7c898d6..c0ee98762 100644
--- a/nova/scheduler/filters/compute_filter.py
+++ b/nova/scheduler/filters/compute_filter.py
@@ -22,10 +22,59 @@ LOG = logging.getLogger(__name__)
class ComputeFilter(filters.BaseHostFilter):
- """Filter on active Compute nodes"""
+ """Filter on active Compute nodes that satisfy the instance properties"""
+
+ def _instance_supported(self, capabilities, instance_meta):
+ """Check if the instance is supported by the hypervisor.
+
+ The instance may specify an architecture, hypervisor, and
+ vm_mode, e.g. (x86_64, kvm, hvm).
+ """
+ inst_arch = instance_meta.get('image_architecture', None)
+ inst_h_type = instance_meta.get('image_hypervisor_type', None)
+ inst_vm_mode = instance_meta.get('image_vm_mode', None)
+ inst_props_req = (inst_arch, inst_h_type, inst_vm_mode)
+
+ # Supported if no compute-related instance properties are specified
+ if not any(inst_props_req):
+ return True
+
+ supp_instances = capabilities.get('supported_instances', None)
+ # Not supported if an instance property is requested but nothing
+ # advertised by the host.
+ if not supp_instances:
+ LOG.debug(_("Instance contains properties %(instance_meta)s, "
+ "but no corresponding capabilities are advertised "
+ "by the compute node"), locals())
+ return False
+
+ def _compare_props(props, other_props):
+ for i in props:
+ if i and i not in other_props:
+ return False
+ return True
+
+ for supp_inst in supp_instances:
+ if _compare_props(inst_props_req, supp_inst):
+ LOG.debug(_("Instance properties %(instance_meta)s "
+ "are satisfied by compute host capabilities "
+ "%(capabilities)s"), locals())
+ return True
+
+ LOG.debug(_("Instance contains properties %(instance_meta)s "
+ "that are not provided by the compute node "
+ "capabilities %(capabilities)s"), locals())
+ return False
def host_passes(self, host_state, filter_properties):
- """Returns True for only active compute nodes"""
+ """Check if host passes instance compute properties.
+
+ Returns True for active compute nodes that satisfy
+ the compute properties specified in the instance.
+ """
+ spec = filter_properties.get('request_spec', {})
+ instance_props = spec.get('instance_properties', {})
+ instance_meta = instance_props.get('system_metadata', {})
instance_type = filter_properties.get('instance_type')
if host_state.topic != 'compute' or not instance_type:
return True
@@ -40,4 +89,8 @@ class ComputeFilter(filters.BaseHostFilter):
LOG.debug(_("%(host_state)s is disabled via capabilities"),
locals())
return False
+ if not self._instance_supported(capabilities, instance_meta):
+ LOG.debug(_("%(host_state)s does not support requested "
+ "instance_properties"), locals())
+ return False
return True
diff --git a/nova/scheduler/filters/extra_specs_ops.py b/nova/scheduler/filters/extra_specs_ops.py
new file mode 100644
index 000000000..3720a2c9e
--- /dev/null
+++ b/nova/scheduler/filters/extra_specs_ops.py
@@ -0,0 +1,63 @@
+# Copyright (c) 2011 OpenStack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import operator
+
+# 1. The following operations are supported:
+# =, s==, s!=, s>=, s>, s<=, s<, <in>, <or>, ==, !=, >=, <=
+# 2. Note that <or> is handled in a different way below.
+# 3. If the first word in the capability is not one of the operators,
+# it is ignored.
+_op_methods = {'=': lambda x, y: float(x) >= float(y),
+ '<in>': lambda x, y: y in x,
+ '==': lambda x, y: float(x) == float(y),
+ '!=': lambda x, y: float(x) != float(y),
+ '>=': lambda x, y: float(x) >= float(y),
+ '<=': lambda x, y: float(x) <= float(y),
+ 's==': operator.eq,
+ 's!=': operator.ne,
+ 's<': operator.lt,
+ 's<=': operator.le,
+ 's>': operator.gt,
+ 's>=': operator.ge}
+
+
+def match(value, req):
+ words = req.split()
+
+ op = method = None
+ if words:
+ op = words.pop(0)
+ method = _op_methods.get(op)
+
+ if op != '<or>' and not method:
+ return value == req
+
+ if value is None:
+ return False
+
+ if op == '<or>': # Ex: <or> v1 <or> v2 <or> v3
+ while True:
+ if words.pop(0) == value:
+ return True
+ if not words:
+ break
+ op = words.pop(0)
+ return False
+
+ if words and method(value, words[0]):
+ return True
+
+ return False
diff --git a/nova/scheduler/host_manager.py b/nova/scheduler/host_manager.py
index 0403dffdf..fb683d91f 100644
--- a/nova/scheduler/host_manager.py
+++ b/nova/scheduler/host_manager.py
@@ -174,7 +174,7 @@ class HostState(object):
class HostManager(object):
"""Base HostManager class."""
- # Can be overriden in a subclass
+ # Can be overridden in a subclass
host_state_cls = HostState
def __init__(self):
diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py
index 41a9cf746..e1030121a 100644
--- a/nova/scheduler/manager.py
+++ b/nova/scheduler/manager.py
@@ -53,7 +53,7 @@ QUOTAS = quota.QUOTAS
class SchedulerManager(manager.Manager):
"""Chooses a host to run instances on."""
- RPC_API_VERSION = '1.5'
+ RPC_API_VERSION = '1.6'
def __init__(self, scheduler_driver=None, *args, **kwargs):
if not scheduler_driver:
@@ -117,7 +117,7 @@ class SchedulerManager(manager.Manager):
def run_instance(self, context, request_spec, admin_password,
injected_files, requested_networks, is_first_time,
- filter_properties, reservations, topic=None):
+ filter_properties, reservations=None, topic=None):
"""Tries to call schedule_run_instance on the driver.
Sets instance vm_state to ERROR on exceptions
"""
@@ -128,7 +128,7 @@ class SchedulerManager(manager.Manager):
reservations)
return result
except exception.NoValidHost as ex:
- # don't reraise
+ # don't re-raise
self._set_vm_state_and_notify('run_instance',
{'vm_state': vm_states.ERROR},
context, ex, request_spec)
@@ -202,7 +202,13 @@ class SchedulerManager(manager.Manager):
vm_state = updates['vm_state']
properties = request_spec.get('instance_properties', {})
- instance_uuid = properties.get('uuid', {})
+ # FIXME(comstud): We really need to move error handling closer
+ # to where the errors occur so we can deal with errors on
+ # individual instances when scheduling multiple.
+ if 'instance_uuids' in request_spec:
+ instance_uuid = request_spec['instance_uuids'][0]
+ else:
+ instance_uuid = properties.get('uuid', {})
if instance_uuid:
state = vm_state.upper()
diff --git a/nova/scheduler/rpcapi.py b/nova/scheduler/rpcapi.py
index 6f9d00ad2..84d0286a2 100644
--- a/nova/scheduler/rpcapi.py
+++ b/nova/scheduler/rpcapi.py
@@ -40,6 +40,7 @@ class SchedulerAPI(nova.openstack.common.rpc.proxy.RpcProxy):
1.3 - Remove instance_id, add instance to live_migration
1.4 - Remove update_db from prep_resize
1.5 - Add reservations argument to prep_resize()
+ 1.6 - Remove reservations argument to run_instance()
'''
BASE_RPC_API_VERSION = '1.0'
@@ -50,15 +51,13 @@ class SchedulerAPI(nova.openstack.common.rpc.proxy.RpcProxy):
def run_instance(self, ctxt, request_spec, admin_password,
injected_files, requested_networks, is_first_time,
- filter_properties, reservations, call=True):
- rpc_method = self.call if call else self.cast
- return rpc_method(ctxt, self.make_msg('run_instance',
+ filter_properties):
+ return self.cast(ctxt, self.make_msg('run_instance',
request_spec=request_spec, admin_password=admin_password,
injected_files=injected_files,
requested_networks=requested_networks,
is_first_time=is_first_time,
- filter_properties=filter_properties,
- reservations=reservations), version='1.2')
+ filter_properties=filter_properties), version='1.6')
def prep_resize(self, ctxt, instance, instance_type, image,
request_spec, filter_properties, reservations):
diff --git a/nova/test.py b/nova/test.py
index da1f17387..d14d10981 100644
--- a/nova/test.py
+++ b/nova/test.py
@@ -221,7 +221,7 @@ class TestCase(unittest.TestCase):
error = abs(float(d1value) - float(d2value))
within_tolerance = error <= tolerance
except (ValueError, TypeError):
- # If both values aren't convertable to float, just ignore
+ # If both values aren't convertible to float, just ignore
# ValueError if arg is a str, TypeError if it's something else
# (like None)
within_tolerance = False
diff --git a/nova/tests/api/ec2/test_ec2_validate.py b/nova/tests/api/ec2/test_ec2_validate.py
index 18f99ffbb..f193f1a3c 100644
--- a/nova/tests/api/ec2/test_ec2_validate.py
+++ b/nova/tests/api/ec2/test_ec2_validate.py
@@ -22,7 +22,6 @@ from nova import context
from nova import db
from nova import exception
from nova import flags
-from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
from nova import test
diff --git a/nova/tests/api/openstack/compute/contrib/test_hosts.py b/nova/tests/api/openstack/compute/contrib/test_hosts.py
index 686548726..ea23cd5c2 100644
--- a/nova/tests/api/openstack/compute/contrib/test_hosts.py
+++ b/nova/tests/api/openstack/compute/contrib/test_hosts.py
@@ -21,7 +21,6 @@ from nova.compute import power_state
from nova.compute import vm_states
from nova import context
from nova import db
-from nova import exception
from nova import flags
from nova.openstack.common import log as logging
from nova import test
diff --git a/nova/tests/api/openstack/compute/contrib/test_instance_usage_audit_log.py b/nova/tests/api/openstack/compute/contrib/test_instance_usage_audit_log.py
index 2e06897c7..4ada22a17 100644
--- a/nova/tests/api/openstack/compute/contrib/test_instance_usage_audit_log.py
+++ b/nova/tests/api/openstack/compute/contrib/test_instance_usage_audit_log.py
@@ -14,13 +14,10 @@
# under the License.
import datetime
-from webob import exc
from nova.api.openstack.compute.contrib import instance_usage_audit_log as ial
-from nova.compute import utils as compute_utils
from nova import context
from nova import db
-from nova import exception
from nova.openstack.common import timeutils
from nova import test
from nova.tests.api.openstack import fakes
diff --git a/nova/tests/api/openstack/compute/contrib/test_keypairs.py b/nova/tests/api/openstack/compute/contrib/test_keypairs.py
index 795e3dcec..d59ca3164 100644
--- a/nova/tests/api/openstack/compute/contrib/test_keypairs.py
+++ b/nova/tests/api/openstack/compute/contrib/test_keypairs.py
@@ -249,16 +249,45 @@ class KeypairsTest(test.TestCase):
def test_keypair_delete_not_found(self):
def db_key_pair_get_not_found(context, user_id, name):
- raise exception.KeyPairNotFound()
+ raise exception.KeypairNotFound(user_id=user_id, name=name)
self.stubs.Set(db, "key_pair_get",
db_key_pair_get_not_found)
req = webob.Request.blank('/v2/fake/os-keypairs/WHAT')
res = req.get_response(fakes.wsgi_app())
- print res
self.assertEqual(res.status_int, 404)
- def test_show(self):
+ def test_keypair_show(self):
+
+ def _db_key_pair_get(context, user_id, name):
+ return {'name': 'foo', 'public_key': 'XXX', 'fingerprint': 'YYY'}
+
+ self.stubs.Set(db, "key_pair_get", _db_key_pair_get)
+
+ req = webob.Request.blank('/v2/fake/os-keypairs/FAKE')
+ req.method = 'GET'
+ req.headers['Content-Type'] = 'application/json'
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = jsonutils.loads(res.body)
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual('foo', res_dict['keypair']['name'])
+ self.assertEqual('XXX', res_dict['keypair']['public_key'])
+ self.assertEqual('YYY', res_dict['keypair']['fingerprint'])
+
+ def test_keypair_show_not_found(self):
+
+ def _db_key_pair_get(context, user_id, name):
+ raise exception.KeypairNotFound(user_id=user_id, name=name)
+
+ self.stubs.Set(db, "key_pair_get", _db_key_pair_get)
+
+ req = webob.Request.blank('/v2/fake/os-keypairs/FAKE')
+ req.method = 'GET'
+ req.headers['Content-Type'] = 'application/json'
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 404)
+
+ def test_show_server(self):
self.stubs.Set(db, 'instance_get',
fakes.fake_instance_get())
req = webob.Request.blank('/v2/fake/servers/1')
@@ -296,7 +325,6 @@ class KeypairsXMLSerializerTest(test.TestCase):
serializer = keypairs.KeypairTemplate()
text = serializer.serialize(exemplar)
- print text
tree = etree.fromstring(text)
self.assertEqual('keypair', tree.tag)
@@ -317,7 +345,6 @@ class KeypairsXMLSerializerTest(test.TestCase):
serializer = keypairs.KeypairsTemplate()
text = serializer.serialize(exemplar)
- print text
tree = etree.fromstring(text)
self.assertEqual('keypairs', tree.tag)
diff --git a/nova/tests/api/openstack/compute/contrib/test_scheduler_hints.py b/nova/tests/api/openstack/compute/contrib/test_scheduler_hints.py
index afb6d966b..c651444f4 100644
--- a/nova/tests/api/openstack/compute/contrib/test_scheduler_hints.py
+++ b/nova/tests/api/openstack/compute/contrib/test_scheduler_hints.py
@@ -94,20 +94,3 @@ class SchedulerHintsTestCase(test.TestCase):
req.body = jsonutils.dumps(body)
res = req.get_response(self.app)
self.assertEqual(400, res.status_int)
-
- def test_create_missing_server(self):
- """Test create with malformed body"""
-
- def fake_create(*args, **kwargs):
- raise Exception("Request should not reach the compute API.")
-
- self.stubs.Set(nova.compute.api.API, 'create', fake_create)
-
- req = fakes.HTTPRequest.blank('/fake/servers')
- req.method = 'POST'
- req.content_type = 'application/json'
- body = {'os:scheduler_hints': {'a': 'b'}}
-
- req.body = jsonutils.dumps(body)
- res = req.get_response(self.app)
- self.assertEqual(400, res.status_int)
diff --git a/nova/tests/api/openstack/compute/test_servers.py b/nova/tests/api/openstack/compute/test_servers.py
index c37a64f8f..662a272f8 100644
--- a/nova/tests/api/openstack/compute/test_servers.py
+++ b/nova/tests/api/openstack/compute/test_servers.py
@@ -4750,3 +4750,30 @@ class ServerXMLSerializationTest(test.TestCase):
str(ip['version']))
self.assertEqual(str(ip_elem.get('addr')),
str(ip['addr']))
+
+
+class ServersAllExtensionsTestCase(test.TestCase):
+ """
+ Servers tests using default API router with all extensions enabled.
+ """
+
+ def setUp(self):
+ super(ServersAllExtensionsTestCase, self).setUp()
+ self.app = nova.api.openstack.compute.APIRouter()
+
+ def test_create_missing_server(self):
+ """Test create with malformed body"""
+
+ def fake_create(*args, **kwargs):
+ raise Exception("Request should not reach the compute API.")
+
+ self.stubs.Set(nova.compute.api.API, 'create', fake_create)
+
+ req = fakes.HTTPRequest.blank('/fake/servers')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ body = {'foo': {'a': 'b'}}
+
+ req.body = jsonutils.dumps(body)
+ res = req.get_response(self.app)
+ self.assertEqual(422, res.status_int)
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py
index 776bb8237..b6d775bd2 100644
--- a/nova/tests/compute/test_compute.py
+++ b/nova/tests/compute/test_compute.py
@@ -68,38 +68,6 @@ flags.DECLARE('live_migration_retry_count', 'nova.compute.manager')
FAKE_IMAGE_REF = 'fake-image-ref'
-orig_rpc_call = rpc.call
-orig_rpc_cast = rpc.cast
-
-
-def rpc_call_wrapper(context, topic, msg, do_cast=True):
- """Stub out the scheduler creating the instance entry"""
- if (topic == FLAGS.scheduler_topic and
- msg['method'] == 'run_instance'):
- request_spec = msg['args']['request_spec']
- reservations = msg['args'].get('reservations')
- scheduler = scheduler_driver.Scheduler
- num_instances = request_spec.get('num_instances', 1)
- instances = []
- for num in xrange(num_instances):
- request_spec['instance_properties']['launch_index'] = num
- instance = scheduler().create_instance_db_entry(
- context, request_spec, reservations)
- encoded = scheduler_driver.encode_instance(instance)
- instances.append(encoded)
- return instances
- else:
- if do_cast:
- orig_rpc_cast(context, topic, msg)
- else:
- return orig_rpc_call(context, topic, msg)
-
-
-def rpc_cast_wrapper(context, topic, msg):
- """Stub out the scheduler creating the instance entry in
- the reservation_id case.
- """
- rpc_call_wrapper(context, topic, msg, do_cast=True)
def nop_report_driver_status(self):
@@ -107,7 +75,10 @@ def nop_report_driver_status(self):
class FakeSchedulerAPI(object):
- def run_instance(self, *args, **kwargs):
+
+ def run_instance(self, ctxt, request_spec, admin_password,
+ injected_files, requested_networks, is_first_time,
+ filter_properties):
pass
@@ -136,8 +107,6 @@ class BaseTestCase(test.TestCase):
fake_image.stub_out_image_service(self.stubs)
self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
- self.stubs.Set(rpc, 'call', rpc_call_wrapper)
- self.stubs.Set(rpc, 'cast', rpc_cast_wrapper)
fake_rpcapi = FakeSchedulerAPI()
self.stubs.Set(self.compute, 'scheduler_rpcapi', fake_rpcapi)
@@ -3930,6 +3899,15 @@ class ComputeAPITestCase(BaseTestCase):
fake_instance, fake_console_type)
self.assertEqual(console, {'url': 'fake_console_url'})
+ def test_get_vnc_console_no_host(self):
+ instance = self._create_fake_instance(params={'host': ''})
+
+ self.assertRaises(exception.InstanceNotReady,
+ self.compute_api.get_vnc_console,
+ self.context, instance, 'novnc')
+
+ db.instance_destroy(self.context, instance['uuid'])
+
def test_console_output(self):
fake_instance = {'uuid': 'fake_uuid',
'host': 'fake_compute_host'}
@@ -4500,6 +4478,7 @@ class KeypairAPITestCase(BaseTestCase):
self.keypair_api = compute_api.KeypairAPI()
self.ctxt = context.RequestContext('fake', 'fake')
self._keypair_db_call_stubs()
+ self.existing_key_name = 'fake existing key name'
self.pub_key = ('ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDLnVkqJu9WVf'
'/5StU3JCrBR2r1s1j8K1tux+5XeSvdqaM8lMFNorzbY5iyoBbR'
'S56gy1jmm43QsMPJsrpfUZKcJpRENSe3OxIIwWXRoiapZe78u/'
@@ -4521,12 +4500,22 @@ class KeypairAPITestCase(BaseTestCase):
def db_key_pair_destroy(context, user_id, name):
pass
+ def db_key_pair_get(context, user_id, name):
+ if name == self.existing_key_name:
+ return {'name': self.existing_key_name,
+ 'public_key': self.pub_key,
+ 'fingerprint': self.fingerprint}
+ else:
+ raise exception.KeypairNotFound(user_id=user_id, name=name)
+
self.stubs.Set(db, "key_pair_get_all_by_user",
db_key_pair_get_all_by_user)
self.stubs.Set(db, "key_pair_create",
db_key_pair_create)
self.stubs.Set(db, "key_pair_destroy",
db_key_pair_destroy)
+ self.stubs.Set(db, "key_pair_get",
+ db_key_pair_get)
def test_create_keypair(self):
keypair = self.keypair_api.create_key_pair(self.ctxt,
@@ -4544,13 +4533,10 @@ class KeypairAPITestCase(BaseTestCase):
self.ctxt, self.ctxt.user_id, '* BAD CHARACTERS! *')
def test_create_keypair_already_exists(self):
- def db_key_pair_get(context, user_id, name):
- pass
- self.stubs.Set(db, "key_pair_get",
- db_key_pair_get)
self.assertRaises(exception.KeyPairExists,
self.keypair_api.create_key_pair,
- self.ctxt, self.ctxt.user_id, 'foo')
+ self.ctxt, self.ctxt.user_id,
+ self.existing_key_name)
def test_create_keypair_quota_limit(self):
def fake_quotas_count(self, context, resource, *args, **kwargs):
@@ -4594,6 +4580,12 @@ class KeypairAPITestCase(BaseTestCase):
self.keypair_api.import_key_pair,
self.ctxt, self.ctxt.user_id, 'foo', self.pub_key)
+ def test_get_keypair(self):
+ keypair = self.keypair_api.get_key_pair(self.ctxt,
+ self.ctxt.user_id,
+ self.existing_key_name)
+ self.assertEqual(self.existing_key_name, keypair['name'])
+
class DisabledInstanceTypesTestCase(BaseTestCase):
"""
@@ -4760,10 +4752,10 @@ class ComputeReschedulingTestCase(BaseTestCase):
def test_reschedule_success(self):
retry = dict(num_attempts=1)
filter_properties = dict(retry=retry)
- request_spec = {'num_instances': 42}
+ request_spec = {'instance_uuids': ['foo', 'bar']}
self.assertTrue(self._reschedule(filter_properties=filter_properties,
request_spec=request_spec))
- self.assertEqual(1, request_spec['num_instances'])
+ self.assertEqual(1, len(request_spec['instance_uuids']))
class ThatsNoOrdinaryRabbitException(Exception):
diff --git a/nova/tests/compute/test_compute_utils.py b/nova/tests/compute/test_compute_utils.py
index 40e1947e6..7ae692bb6 100644
--- a/nova/tests/compute/test_compute_utils.py
+++ b/nova/tests/compute/test_compute_utils.py
@@ -17,10 +17,13 @@
"""Tests For miscellaneous util methods used with compute."""
+import string
+
from nova.compute import instance_types
from nova.compute import utils as compute_utils
from nova import context
from nova import db
+from nova import exception
from nova import flags
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
@@ -37,6 +40,97 @@ FLAGS = flags.FLAGS
flags.DECLARE('stub_network', 'nova.compute.manager')
+class ComputeValidateDeviceTestCase(test.TestCase):
+ def setUp(self):
+ super(ComputeValidateDeviceTestCase, self).setUp()
+ self.context = context.RequestContext('fake', 'fake')
+ self.instance = {
+ 'uuid': 'fake',
+ 'root_device_name': '/dev/vda',
+ 'default_ephemeral_device': '/dev/vdb'
+ }
+
+ def _validate_device(self, device=None):
+ return compute_utils.get_device_name_for_instance(self.context,
+ self.instance,
+ device)
+
+ @staticmethod
+ def _fake_bdm(device):
+ return {
+ 'device_name': device,
+ 'no_device': None,
+ 'volume_id': 'fake',
+ 'snapshot_id': None
+ }
+
+ def test_wrap(self):
+ data = []
+ for letter in string.ascii_lowercase[2:]:
+ data.append(self._fake_bdm('/dev/vd' + letter))
+ self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
+ lambda context, instance: data)
+ device = self._validate_device()
+ self.assertEqual(device, '/dev/vdaa')
+
+ def test_wrap_plus_one(self):
+ data = []
+ for letter in string.ascii_lowercase[2:]:
+ data.append(self._fake_bdm('/dev/vd' + letter))
+ data.append(self._fake_bdm('/dev/vdaa'))
+ self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
+ lambda context, instance: data)
+ device = self._validate_device()
+ self.assertEqual(device, '/dev/vdab')
+
+ def test_later(self):
+ data = [
+ self._fake_bdm('/dev/vdc'),
+ self._fake_bdm('/dev/vdd'),
+ self._fake_bdm('/dev/vde'),
+ ]
+ self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
+ lambda context, instance: data)
+ device = self._validate_device()
+ self.assertEqual(device, '/dev/vdf')
+
+ def test_gap(self):
+ data = [
+ self._fake_bdm('/dev/vdc'),
+ self._fake_bdm('/dev/vde'),
+ ]
+ self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
+ lambda context, instance: data)
+ device = self._validate_device()
+ self.assertEqual(device, '/dev/vdd')
+
+ def test_no_bdms(self):
+ data = []
+ self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
+ lambda context, instance: data)
+ device = self._validate_device()
+ self.assertEqual(device, '/dev/vdc')
+
+ def test_invalid_bdms(self):
+ self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
+ lambda context, instance: [])
+ self.instance['root_device_name'] = "baddata"
+ self.assertRaises(exception.InvalidDevicePath,
+ self._validate_device)
+
+ def test_invalid_device_prefix(self):
+ self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
+ lambda context, instance: [])
+ self.assertRaises(exception.InvalidDevicePath,
+ self._validate_device, '/baddata/vdc')
+
+ def test_device_in_use(self):
+ self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
+ lambda context, instance: [])
+ self.assertRaises(exception.DevicePathInUse,
+ self._validate_device, '/dev/vdb')
+
+
class UsageInfoTestCase(test.TestCase):
def setUp(self):
diff --git a/nova/tests/compute/test_rpcapi.py b/nova/tests/compute/test_rpcapi.py
index e88cb2096..38849d3ed 100644
--- a/nova/tests/compute/test_rpcapi.py
+++ b/nova/tests/compute/test_rpcapi.py
@@ -232,6 +232,10 @@ class ComputeRpcAPITestCase(test.TestCase):
injected_files='files', image_ref='ref',
orig_image_ref='orig_ref', version='1.24')
+ def test_reserve_block_device_name(self):
+ self._test_compute_api('reserve_block_device_name', 'call',
+ instance=self.fake_instance, device='device', version='1.44')
+
def refresh_provider_fw_rules(self):
self._test_compute_api('refresh_provider_fw_rules', 'cast',
host='host')
diff --git a/nova/tests/console/test_console.py b/nova/tests/console/test_console.py
index 8f9b87fa9..f1e1edc54 100644
--- a/nova/tests/console/test_console.py
+++ b/nova/tests/console/test_console.py
@@ -18,11 +18,14 @@
"""Tests For Console proxy."""
+from nova.console import api as console_api
+from nova.console import rpcapi as console_rpcapi
from nova import context
from nova import db
from nova import exception
from nova import flags
from nova.openstack.common import importutils
+from nova.openstack.common import rpc
from nova import test
FLAGS = flags.FLAGS
@@ -30,7 +33,7 @@ flags.DECLARE('console_driver', 'nova.console.manager')
class ConsoleTestCase(test.TestCase):
- """Test case for console proxy"""
+ """Test case for console proxy manager"""
def setUp(self):
super(ConsoleTestCase, self).setUp()
self.flags(console_driver='nova.console.fake.FakeConsoleProxy',
@@ -116,3 +119,68 @@ class ConsoleTestCase(test.TestCase):
self.context,
console_id)
db.instance_destroy(self.context, instance['uuid'])
+
+
+class ConsoleAPITestCase(test.TestCase):
+ """Test case for console API"""
+ def setUp(self):
+ super(ConsoleAPITestCase, self).setUp()
+
+ self.context = context.RequestContext('fake', 'fake')
+ self.console_api = console_api.API()
+ self.fake_uuid = '00000000-aaaa-bbbb-cccc-000000000000'
+ self.fake_instance = {
+ 'id': 1,
+ 'uuid': self.fake_uuid,
+ 'host': 'fake_host'
+ }
+ self.fake_console = {
+ 'pool': {'host': 'fake_host'},
+ 'id': 'fake_id'
+ }
+
+ def _fake_cast(_ctxt, _topic, _msg):
+ pass
+ self.stubs.Set(rpc, 'cast', _fake_cast)
+
+ def _fake_db_console_get(_ctxt, _console_uuid, _instance_uuid):
+ return self.fake_console
+ self.stubs.Set(db, 'console_get', _fake_db_console_get)
+
+ def _fake_db_console_get_all_by_instance(_ctxt, _instance_uuid):
+ return [self.fake_console]
+ self.stubs.Set(db, 'console_get_all_by_instance',
+ _fake_db_console_get_all_by_instance)
+
+ def _fake_instance_get_by_uuid(_ctxt, _instance_uuid):
+ return self.fake_instance
+ self.stubs.Set(db, 'instance_get_by_uuid', _fake_instance_get_by_uuid)
+
+ def test_get_consoles(self):
+ console = self.console_api.get_consoles(self.context, self.fake_uuid)
+ self.assertEqual(console, [self.fake_console])
+
+ def test_get_console(self):
+ console = self.console_api.get_console(self.context, self.fake_uuid,
+ 'fake_id')
+ self.assertEqual(console, self.fake_console)
+
+ def test_delete_console(self):
+ self.mox.StubOutWithMock(console_rpcapi.ConsoleAPI, 'remove_console')
+
+ console_rpcapi.ConsoleAPI.remove_console(self.context, 'fake_id')
+
+ self.mox.ReplayAll()
+
+ self.console_api.delete_console(self.context, self.fake_uuid,
+ 'fake_id')
+
+ def test_create_console(self):
+ self.mox.StubOutWithMock(console_rpcapi.ConsoleAPI, 'add_console')
+
+ console_rpcapi.ConsoleAPI.add_console(self.context,
+ self.fake_instance['id'])
+
+ self.mox.ReplayAll()
+
+ self.console_api.create_console(self.context, self.fake_uuid)
diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py
index aada0d7e3..f6d9496b1 100644
--- a/nova/tests/fake_flags.py
+++ b/nova/tests/fake_flags.py
@@ -48,3 +48,4 @@ def set_defaults(conf):
conf.set_default('api_paste_config', '$state_path/etc/nova/api-paste.ini')
conf.set_default('rpc_response_timeout', 5)
conf.set_default('rpc_cast_timeout', 5)
+ conf.set_default('lock_path', None)
diff --git a/nova/tests/hyperv/__init__.py b/nova/tests/hyperv/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/nova/tests/hyperv/__init__.py
diff --git a/nova/tests/hyperv/basetestcase.py b/nova/tests/hyperv/basetestcase.py
new file mode 100644
index 000000000..318cf2e28
--- /dev/null
+++ b/nova/tests/hyperv/basetestcase.py
@@ -0,0 +1,96 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 Cloudbase Solutions Srl
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+TestCase for MockProxy based tests and related classes.
+"""
+
+import gzip
+import os
+import pickle
+
+from nova import test
+from nova.tests.hyperv import mockproxy
+
+gen_test_mocks_key = 'NOVA_GENERATE_TEST_MOCKS'
+
+
+class BaseTestCase(test.TestCase):
+ """TestCase for MockProxy based tests."""
+
+ def run(self, result=None):
+ self._currentResult = result
+ super(BaseTestCase, self).run(result)
+
+ def setUp(self):
+ super(BaseTestCase, self).setUp()
+ self._mps = {}
+
+ def tearDown(self):
+ super(BaseTestCase, self).tearDown()
+
+ has_errors = len([test for (test, msgs) in self._currentResult.errors
+ if test.id() == self.id()]) > 0
+ failed = len([test for (test, msgs) in self._currentResult.failures
+ if test.id() == self.id()]) > 0
+
+ if not has_errors and not failed:
+ self._save_mock_proxies()
+
+ def _save_mock(self, name, mock):
+ path = self._get_stub_file_path(self.id(), name)
+ pickle.dump(mock, gzip.open(path, 'wb'))
+
+ def _get_stub_file_path(self, test_name, mock_name):
+ # test naming differs between platforms
+ prefix = 'nova.tests.'
+ if test_name.startswith(prefix):
+ test_name = test_name[len(prefix):]
+ file_name = '{0}_{1}.p.gz'.format(test_name, mock_name)
+ return os.path.join(os.path.dirname(mockproxy.__file__),
+ "stubs", file_name)
+
+ def _load_mock(self, name):
+ path = self._get_stub_file_path(self.id(), name)
+ if os.path.exists(path):
+ return pickle.load(gzip.open(path, 'rb'))
+ else:
+ return None
+
+ def _load_mock_or_create_proxy(self, module_name):
+ m = None
+ if not gen_test_mocks_key in os.environ or \
+ os.environ[gen_test_mocks_key].lower() \
+ not in ['true', 'yes', '1']:
+ m = self._load_mock(module_name)
+ else:
+ module = __import__(module_name)
+ m = mockproxy.MockProxy(module)
+ self._mps[module_name] = m
+ return m
+
+ def _inject_mocks_in_modules(self, objects_to_mock, modules_to_test):
+ for module_name in objects_to_mock:
+ mp = self._load_mock_or_create_proxy(module_name)
+ for mt in modules_to_test:
+ module_local_name = module_name.split('.')[-1]
+ setattr(mt, module_local_name, mp)
+
+ def _save_mock_proxies(self):
+ for name, mp in self._mps.items():
+ m = mp.get_mock()
+ if m.has_values():
+ self._save_mock(name, m)
diff --git a/nova/tests/hyperv/db_fakes.py b/nova/tests/hyperv/db_fakes.py
new file mode 100644
index 000000000..9f5572fd1
--- /dev/null
+++ b/nova/tests/hyperv/db_fakes.py
@@ -0,0 +1,166 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 Cloudbase Solutions Srl
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Stubouts, mocks and fixtures for the test suite
+"""
+
+import time
+
+from nova.compute import task_states
+from nova.compute import vm_states
+from nova import db
+from nova import utils
+
+
+def get_fake_instance_data(name, project_id, user_id):
+ return {'name': name,
+ 'id': 1,
+ 'uuid': utils.gen_uuid(),
+ 'project_id': project_id,
+ 'user_id': user_id,
+ 'image_ref': "1",
+ 'kernel_id': "1",
+ 'ramdisk_id': "1",
+ 'mac_address': "de:ad:be:ef:be:ef",
+ 'instance_type': 'm1.tiny',
+ }
+
+
+def get_fake_image_data(project_id, user_id):
+ return {'name': 'image1',
+ 'id': 1,
+ 'project_id': project_id,
+ 'user_id': user_id,
+ 'image_ref': "1",
+ 'kernel_id': "1",
+ 'ramdisk_id': "1",
+ 'mac_address': "de:ad:be:ef:be:ef",
+ 'instance_type': 'm1.tiny',
+ }
+
+
+def get_fake_volume_info_data(target_portal, volume_id):
+ return {
+ 'driver_volume_type': 'iscsi',
+ 'data': {
+ 'volume_id': 1,
+ 'target_iqn': 'iqn.2010-10.org.openstack:volume-' + volume_id,
+ 'target_portal': target_portal,
+ 'target_lun': 1,
+ 'auth_method': 'CHAP',
+ 'auth_method': 'fake',
+ 'auth_method': 'fake',
+ }
+}
+
+
+def get_fake_block_device_info(target_portal, volume_id):
+ return {
+ 'block_device_mapping': [{'connection_info': {
+ 'driver_volume_type': 'iscsi',
+ 'data': {'target_lun': 1,
+ 'volume_id': volume_id,
+ 'target_iqn': 'iqn.2010-10.org.openstack:volume-' +
+ volume_id,
+ 'target_portal': target_portal,
+ 'target_discovered': False}},
+ 'mount_device': 'vda',
+ 'delete_on_termination': False}],
+ 'root_device_name': None,
+ 'ephemerals': [],
+ 'swap': None
+ }
+
+
+def stub_out_db_instance_api(stubs):
+ """Stubs out the db API for creating Instances."""
+
+ INSTANCE_TYPES = {
+ 'm1.tiny': dict(memory_mb=512, vcpus=1, root_gb=0, flavorid=1),
+ 'm1.small': dict(memory_mb=2048, vcpus=1, root_gb=20, flavorid=2),
+ 'm1.medium':
+ dict(memory_mb=4096, vcpus=2, root_gb=40, flavorid=3),
+ 'm1.large': dict(memory_mb=8192, vcpus=4, root_gb=80, flavorid=4),
+ 'm1.xlarge':
+ dict(memory_mb=16384, vcpus=8, root_gb=160, flavorid=5)}
+
+ class FakeModel(object):
+ """Stubs out for model."""
+
+ def __init__(self, values):
+ self.values = values
+
+ def __getattr__(self, name):
+ return self.values[name]
+
+ def __getitem__(self, key):
+ if key in self.values:
+ return self.values[key]
+ else:
+ raise NotImplementedError()
+
+ def fake_instance_create(context, values):
+ """Stubs out the db.instance_create method."""
+
+ if 'instance_type' not in values:
+ return
+
+ type_data = INSTANCE_TYPES[values['instance_type']]
+
+ base_options = {
+ 'name': values['name'],
+ 'id': values['id'],
+ 'uuid': utils.gen_uuid(),
+ 'reservation_id': utils.generate_uid('r'),
+ 'image_ref': values['image_ref'],
+ 'kernel_id': values['kernel_id'],
+ 'ramdisk_id': values['ramdisk_id'],
+ 'vm_state': vm_states.BUILDING,
+ 'task_state': task_states.SCHEDULING,
+ 'user_id': values['user_id'],
+ 'project_id': values['project_id'],
+ 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
+ 'instance_type': values['instance_type'],
+ 'memory_mb': type_data['memory_mb'],
+ 'vcpus': type_data['vcpus'],
+ 'mac_addresses': [{'address': values['mac_address']}],
+ 'root_gb': type_data['root_gb'],
+ }
+ return FakeModel(base_options)
+
+ def fake_network_get_by_instance(context, instance_id):
+ """Stubs out the db.network_get_by_instance method."""
+
+ fields = {
+ 'bridge': 'vmnet0',
+ 'netmask': '255.255.255.0',
+ 'gateway': '10.10.10.1',
+ 'broadcast': '10.10.10.255',
+ 'dns1': 'fake',
+ 'vlan': 100}
+ return FakeModel(fields)
+
+ def fake_instance_type_get_all(context, inactive=0, filters=None):
+ return INSTANCE_TYPES.values()
+
+ def fake_instance_type_get_by_name(context, name):
+ return INSTANCE_TYPES[name]
+
+ stubs.Set(db, 'instance_create', fake_instance_create)
+ stubs.Set(db, 'network_get_by_instance', fake_network_get_by_instance)
+ stubs.Set(db, 'instance_type_get_all', fake_instance_type_get_all)
+ stubs.Set(db, 'instance_type_get_by_name', fake_instance_type_get_by_name)
diff --git a/nova/tests/hyperv/hypervutils.py b/nova/tests/hyperv/hypervutils.py
new file mode 100644
index 000000000..7cf9f32fe
--- /dev/null
+++ b/nova/tests/hyperv/hypervutils.py
@@ -0,0 +1,245 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 Cloudbase Solutions Srl
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Hyper-V classes to be used in testing.
+"""
+
+import sys
+import time
+
+from nova import exception
+from nova.virt.hyperv import constants
+from nova.virt.hyperv import volumeutils
+from xml.etree import ElementTree
+
+# Check needed for unit testing on Unix
+if sys.platform == 'win32':
+ import wmi
+
+
+class HyperVUtils(object):
+ def __init__(self):
+ self.__conn = None
+ self.__conn_v2 = None
+ self.__conn_cimv2 = None
+ self.__conn_wmi = None
+ self._volumeutils = volumeutils.VolumeUtils()
+
+ @property
+ def _conn(self):
+ if self.__conn is None:
+ self.__conn = wmi.WMI(moniker='//./root/virtualization')
+ return self.__conn
+
+ @property
+ def _conn_v2(self):
+ if self.__conn_v2 is None:
+ self.__conn_v2 = wmi.WMI(moniker='//./root/virtualization/v2')
+ return self.__conn_v2
+
+ @property
+ def _conn_cimv2(self):
+ if self.__conn_cimv2 is None:
+ self.__conn_cimv2 = wmi.WMI(moniker='//./root/cimv2')
+ return self.__conn_cimv2
+
+ @property
+ def _conn_wmi(self):
+ if self.__conn_wmi is None:
+ self.__conn_wmi = wmi.WMI(moniker='//./root/wmi')
+ return self.__conn_wmi
+
+ def create_vhd(self, path):
+ image_service = self._conn.query(
+ "Select * from Msvm_ImageManagementService")[0]
+ (job, ret_val) = image_service.CreateDynamicVirtualHardDisk(
+ Path=path, MaxInternalSize=3 * 1024 * 1024)
+
+ if ret_val == constants.WMI_JOB_STATUS_STARTED:
+ success = self._check_job_status(job)
+ else:
+ success = (ret_val == 0)
+ if not success:
+ raise Exception('Failed to create Dynamic disk %s with error %d'
+ % (path, ret_val))
+
+ def _check_job_status(self, jobpath):
+ """Poll WMI job state for completion"""
+ job_wmi_path = jobpath.replace('\\', '/')
+ job = wmi.WMI(moniker=job_wmi_path)
+
+ while job.JobState == constants.WMI_JOB_STATE_RUNNING:
+ time.sleep(0.1)
+ job = wmi.WMI(moniker=job_wmi_path)
+ return job.JobState == constants.WMI_JOB_STATE_COMPLETED
+
+ def _get_vm(self, vm_name, conn=None):
+ if conn is None:
+ conn = self._conn
+ vml = conn.Msvm_ComputerSystem(ElementName=vm_name)
+ if not len(vml):
+ raise exception.InstanceNotFound(instance=vm_name)
+ return vml[0]
+
+ def remote_vm_exists(self, server, vm_name):
+ conn = wmi.WMI(moniker='//' + server + '/root/virtualization')
+ return self._vm_exists(conn, vm_name)
+
+ def vm_exists(self, vm_name):
+ return self._vm_exists(self._conn, vm_name)
+
+ def _vm_exists(self, conn, vm_name):
+ return len(conn.Msvm_ComputerSystem(ElementName=vm_name)) > 0
+
+ def _get_vm_summary(self, vm_name):
+ vm = self._get_vm(vm_name)
+ vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
+ vmsettings = vm.associators(
+ wmi_association_class='Msvm_SettingsDefineState',
+ wmi_result_class='Msvm_VirtualSystemSettingData')
+ settings_paths = [v.path_() for v in vmsettings]
+ return vs_man_svc.GetSummaryInformation([100, 105],
+ settings_paths)[1][0]
+
+ def get_vm_uptime(self, vm_name):
+ return self._get_vm_summary(vm_name).UpTime
+
+ def get_vm_state(self, vm_name):
+ return self._get_vm_summary(vm_name).EnabledState
+
+ def set_vm_state(self, vm_name, req_state):
+ self._set_vm_state(self._conn, vm_name, req_state)
+
+ def _set_vm_state(self, conn, vm_name, req_state):
+ vm = self._get_vm(vm_name, conn)
+ (job, ret_val) = vm.RequestStateChange(req_state)
+
+ success = False
+ if ret_val == constants.WMI_JOB_STATUS_STARTED:
+ success = self._check_job_status(job)
+ elif ret_val == 0:
+ success = True
+ elif ret_val == 32775:
+ #Invalid state for current operation. Typically means it is
+ #already in the state requested
+ success = True
+ if not success:
+ raise Exception(_("Failed to change vm state of %(vm_name)s"
+ " to %(req_state)s") % locals())
+
+ def get_vm_disks(self, vm_name):
+ return self._get_vm_disks(self._conn, vm_name)
+
+ def _get_vm_disks(self, conn, vm_name):
+ vm = self._get_vm(vm_name, conn)
+ vmsettings = vm.associators(
+ wmi_result_class='Msvm_VirtualSystemSettingData')
+ rasds = vmsettings[0].associators(
+ wmi_result_class='MSVM_ResourceAllocationSettingData')
+
+ disks = [r for r in rasds
+ if r.ResourceSubType == 'Microsoft Virtual Hard Disk']
+ disk_files = []
+ for disk in disks:
+ disk_files.extend([c for c in disk.Connection])
+
+ volumes = [r for r in rasds
+ if r.ResourceSubType == 'Microsoft Physical Disk Drive']
+ volume_drives = []
+ for volume in volumes:
+ hostResources = volume.HostResource
+ drive_path = hostResources[0]
+ volume_drives.append(drive_path)
+
+ return (disk_files, volume_drives)
+
+ def remove_remote_vm(self, server, vm_name):
+ conn = wmi.WMI(moniker='//' + server + '/root/virtualization')
+ conn_cimv2 = wmi.WMI(moniker='//' + server + '/root/cimv2')
+ self._remove_vm(vm_name, conn, conn_cimv2)
+
+ def remove_vm(self, vm_name):
+ self._remove_vm(vm_name, self._conn, self._conn_cimv2)
+
+ def _remove_vm(self, vm_name, conn, conn_cimv2):
+ vm = self._get_vm(vm_name, conn)
+ vs_man_svc = conn.Msvm_VirtualSystemManagementService()[0]
+ #Stop the VM first.
+ self._set_vm_state(conn, vm_name, 3)
+
+ (disk_files, volume_drives) = self._get_vm_disks(conn, vm_name)
+
+ (job, ret_val) = vs_man_svc.DestroyVirtualSystem(vm.path_())
+ if ret_val == constants.WMI_JOB_STATUS_STARTED:
+ success = self._check_job_status(job)
+ elif ret_val == 0:
+ success = True
+ if not success:
+ raise Exception(_('Failed to destroy vm %s') % vm_name)
+
+ #Delete associated vhd disk files.
+ for disk in disk_files:
+ vhd_file = conn_cimv2.query(
+ "Select * from CIM_DataFile where Name = '" +
+ disk.replace("'", "''") + "'")[0]
+ vhd_file.Delete()
+
+ def _get_target_iqn(self, volume_id):
+ return 'iqn.2010-10.org.openstack:volume-' + volume_id
+
+ def logout_iscsi_volume_sessions(self, volume_id):
+ target_iqn = self._get_target_iqn(volume_id)
+ self._volumeutils.logout_storage_target(self._conn_wmi, target_iqn)
+
+ def iscsi_volume_sessions_exist(self, volume_id):
+ target_iqn = self._get_target_iqn(volume_id)
+ return len(self._conn_wmi.query(
+ "SELECT * FROM MSiSCSIInitiator_SessionClass \
+ WHERE TargetName='" + target_iqn + "'")) > 0
+
+ def get_vm_count(self):
+ return len(self._conn.query(
+ "Select * from Msvm_ComputerSystem where Description "
+ "<> 'Microsoft Hosting Computer System'"))
+
+ def get_vm_snapshots_count(self, vm_name):
+ return len(self._conn.query(
+ "Select * from Msvm_VirtualSystemSettingData where \
+ SettingType = 5 and SystemName = '" + vm_name + "'"))
+
+ def get_vhd_parent_path(self, vhd_path):
+
+ image_man_svc = self._conn.Msvm_ImageManagementService()[0]
+
+ (vhd_info, job_path, ret_val) = \
+ image_man_svc.GetVirtualHardDiskInfo(vhd_path)
+ if ret_val == constants.WMI_JOB_STATUS_STARTED:
+ success = self._check_job_status(job_path)
+ else:
+ success = (ret_val == 0)
+ if not success:
+ raise Exception(_("Failed to get info for disk %s") %
+ (vhd_path))
+
+ base_disk_path = None
+ et = ElementTree.fromstring(vhd_info)
+ for item in et.findall("PROPERTY"):
+ if item.attrib["NAME"] == "ParentPath":
+ base_disk_path = item.find("VALUE").text
+ break
+
+ return base_disk_path
diff --git a/nova/tests/hyperv/mockproxy.py b/nova/tests/hyperv/mockproxy.py
new file mode 100644
index 000000000..ff04ea709
--- /dev/null
+++ b/nova/tests/hyperv/mockproxy.py
@@ -0,0 +1,234 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 Cloudbase Solutions Srl
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+
+"""
+Classes for dynamic generation of mock objects.
+"""
+
+import inspect
+
+
+def serialize_obj(obj):
+ if isinstance(obj, float):
+ val = str(round(obj, 10))
+ elif isinstance(obj, dict):
+ d = {}
+ for k1, v1 in obj.items():
+ d[k1] = serialize_obj(v1)
+ val = str(d)
+ elif isinstance(obj, list):
+ l1 = []
+ for i1 in obj:
+ l1.append(serialize_obj(i1))
+ val = str(l1)
+ elif isinstance(obj, tuple):
+ l1 = ()
+ for i1 in obj:
+ l1 = l1 + (serialize_obj(i1),)
+ val = str(l1)
+ else:
+ val = str(obj)
+ return val
+
+
+def serialize_args(*args, **kwargs):
+ """Workaround for float string conversion issues in Python 2.6"""
+ return serialize_obj((args, kwargs))
+
+
+class Mock(object):
+ def _get_next_value(self, name):
+ c = self._access_count.get(name)
+ if c is None:
+ c = 0
+ else:
+ c = c + 1
+ self._access_count[name] = c
+ return self._values[name][c]
+
+ def _get_next_ret_value(self, name, params):
+ d = self._access_count.get(name)
+ if d is None:
+ d = {}
+ self._access_count[name] = d
+ c = d.get(params)
+ if c is None:
+ c = 0
+ else:
+ c = c + 1
+ d[params] = c
+ return self._values[name][params][c]
+
+ def __init__(self, values):
+ self._values = values
+ self._access_count = {}
+
+ def has_values(self):
+ return len(self._values) > 0
+
+ def __getattr__(self, name):
+ if name.startswith('__') and name.endswith('__'):
+ return object.__getattribute__(self, name)
+ else:
+ if isinstance(self._values[name], dict):
+ def newfunc(*args, **kwargs):
+ params = serialize_args(args, kwargs)
+ return self._get_next_ret_value(name, params)
+ return newfunc
+ else:
+ return self._get_next_value(name)
+
+ def __str__(self):
+ return self._get_next_value('__str__')
+
+ def __iter__(self):
+ return getattr(self._get_next_value('__iter__'), '__iter__')()
+
+ def __len__(self):
+ return self._get_next_value('__len__')
+
+ def __getitem__(self, key):
+ return self._get_next_ret_value('__getitem__', str(key))
+
+ def __call__(self, *args, **kwargs):
+ params = serialize_args(args, kwargs)
+ return self._get_next_ret_value('__call__', params)
+
+
+class MockProxy(object):
+ def __init__(self, wrapped):
+ self._wrapped = wrapped
+ self._recorded_values = {}
+
+ def _get_proxy_object(self, obj):
+ if hasattr(obj, '__dict__') or isinstance(obj, tuple) or \
+ isinstance(obj, list) or isinstance(obj, dict):
+ p = MockProxy(obj)
+ else:
+ p = obj
+ return p
+
+ def __getattr__(self, name):
+ if name in ['_wrapped']:
+ return object.__getattribute__(self, name)
+ else:
+ attr = getattr(self._wrapped, name)
+ if inspect.isfunction(attr) or inspect.ismethod(attr) or \
+ inspect.isbuiltin(attr):
+ def newfunc(*args, **kwargs):
+ result = attr(*args, **kwargs)
+ p = self._get_proxy_object(result)
+ params = serialize_args(args, kwargs)
+ self._add_recorded_ret_value(name, params, p)
+ return p
+ return newfunc
+ elif hasattr(attr, '__dict__') or (hasattr(attr, '__getitem__')
+ and not (isinstance(attr, str) or isinstance(attr, unicode))):
+ p = MockProxy(attr)
+ else:
+ p = attr
+ self._add_recorded_value(name, p)
+ return p
+
+ def __setattr__(self, name, value):
+ if name in ['_wrapped', '_recorded_values']:
+ object.__setattr__(self, name, value)
+ else:
+ setattr(self._wrapped, name, value)
+
+ def _add_recorded_ret_value(self, name, params, val):
+ d = self._recorded_values.get(name)
+ if d is None:
+ d = {}
+ self._recorded_values[name] = d
+ l = d.get(params)
+ if l is None:
+ l = []
+ d[params] = l
+ l.append(val)
+
+ def _add_recorded_value(self, name, val):
+ if not name in self._recorded_values:
+ self._recorded_values[name] = []
+ self._recorded_values[name].append(val)
+
+ def get_mock(self):
+ values = {}
+ for k, v in self._recorded_values.items():
+ if isinstance(v, dict):
+ d = {}
+ values[k] = d
+ for k1, v1 in v.items():
+ l = []
+ d[k1] = l
+ for i1 in v1:
+ if isinstance(i1, MockProxy):
+ l.append(i1.get_mock())
+ else:
+ l.append(i1)
+ else:
+ l = []
+ values[k] = l
+ for i in v:
+ if isinstance(i, MockProxy):
+ l.append(i.get_mock())
+ elif isinstance(i, dict):
+ d = {}
+ for k1, v1 in v.items():
+ if isinstance(v1, MockProxy):
+ d[k1] = v1.get_mock()
+ else:
+ d[k1] = v1
+ l.append(d)
+ elif isinstance(i, list):
+ l1 = []
+ for i1 in i:
+ if isinstance(i1, MockProxy):
+ l1.append(i1.get_mock())
+ else:
+ l1.append(i1)
+ l.append(l1)
+ else:
+ l.append(i)
+ return Mock(values)
+
+ def __str__(self):
+ s = str(self._wrapped)
+ self._add_recorded_value('__str__', s)
+ return s
+
+ def __len__(self):
+ l = len(self._wrapped)
+ self._add_recorded_value('__len__', l)
+ return l
+
+ def __iter__(self):
+ it = []
+ for i in self._wrapped:
+ it.append(self._get_proxy_object(i))
+ self._add_recorded_value('__iter__', it)
+ return iter(it)
+
+ def __getitem__(self, key):
+ p = self._get_proxy_object(self._wrapped[key])
+ self._add_recorded_ret_value('__getitem__', str(key), p)
+ return p
+
+ def __call__(self, *args, **kwargs):
+ c = self._wrapped(*args, **kwargs)
+ p = self._get_proxy_object(c)
+ params = serialize_args(args, kwargs)
+ self._add_recorded_ret_value('__call__', params, p)
+ return p
diff --git a/nova/tests/hyperv/stubs/README.rst b/nova/tests/hyperv/stubs/README.rst
new file mode 100644
index 000000000..150fd3ad1
--- /dev/null
+++ b/nova/tests/hyperv/stubs/README.rst
@@ -0,0 +1,2 @@
+Files with extension p.gz are compressed pickle files containing serialized
+mocks used during unit testing
diff --git a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_os.p.gz b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_os.p.gz
new file mode 100644
index 000000000..c65832c57
--- /dev/null
+++ b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_subprocess.p.gz b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_subprocess.p.gz
new file mode 100644
index 000000000..7076c4868
--- /dev/null
+++ b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_subprocess.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_time.p.gz b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_time.p.gz
new file mode 100644
index 000000000..c251f9d6c
--- /dev/null
+++ b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_uuid.p.gz b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_uuid.p.gz
new file mode 100644
index 000000000..cac08e3d0
--- /dev/null
+++ b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_os.p.gz b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_os.p.gz
new file mode 100644
index 000000000..d6e624bb0
--- /dev/null
+++ b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_subprocess.p.gz b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_subprocess.p.gz
new file mode 100644
index 000000000..bb18f7453
--- /dev/null
+++ b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_subprocess.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_time.p.gz b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_time.p.gz
new file mode 100644
index 000000000..a5f592a74
--- /dev/null
+++ b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_uuid.p.gz b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_uuid.p.gz
new file mode 100644
index 000000000..4bebe0e72
--- /dev/null
+++ b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_wmi.p.gz b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_wmi.p.gz
new file mode 100644
index 000000000..29a610f36
--- /dev/null
+++ b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_wmi.p.gz b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_wmi.p.gz
new file mode 100644
index 000000000..ca92ece00
--- /dev/null
+++ b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_boot_from_volume_subprocess.p.gz b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_boot_from_volume_subprocess.p.gz
new file mode 100644
index 000000000..58269455d
--- /dev/null
+++ b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_boot_from_volume_subprocess.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_boot_from_volume_time.p.gz b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_boot_from_volume_time.p.gz
new file mode 100644
index 000000000..97cd7e62b
--- /dev/null
+++ b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_boot_from_volume_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_boot_from_volume_uuid.p.gz b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_boot_from_volume_uuid.p.gz
new file mode 100644
index 000000000..708197430
--- /dev/null
+++ b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_boot_from_volume_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_boot_from_volume_wmi.p.gz b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_boot_from_volume_wmi.p.gz
new file mode 100644
index 000000000..d5eb4d746
--- /dev/null
+++ b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_boot_from_volume_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_detach_volume_os.p.gz b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_detach_volume_os.p.gz
new file mode 100644
index 000000000..d8c63d8ad
--- /dev/null
+++ b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_detach_volume_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_detach_volume_subprocess.p.gz b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_detach_volume_subprocess.p.gz
new file mode 100644
index 000000000..d0b27d201
--- /dev/null
+++ b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_detach_volume_subprocess.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_detach_volume_time.p.gz b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_detach_volume_time.p.gz
new file mode 100644
index 000000000..657379cec
--- /dev/null
+++ b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_detach_volume_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_detach_volume_uuid.p.gz b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_detach_volume_uuid.p.gz
new file mode 100644
index 000000000..8bf58ef5c
--- /dev/null
+++ b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_detach_volume_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_detach_volume_wmi.p.gz b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_detach_volume_wmi.p.gz
new file mode 100644
index 000000000..c20281811
--- /dev/null
+++ b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_detach_volume_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_os.p.gz b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_os.p.gz
new file mode 100644
index 000000000..a198af844
--- /dev/null
+++ b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_time.p.gz b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_time.p.gz
new file mode 100644
index 000000000..749eabe40
--- /dev/null
+++ b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_uuid.p.gz b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_uuid.p.gz
new file mode 100644
index 000000000..c40e6f995
--- /dev/null
+++ b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_os.p.gz b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_os.p.gz
new file mode 100644
index 000000000..c67dc9271
--- /dev/null
+++ b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_time.p.gz b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_time.p.gz
new file mode 100644
index 000000000..0d671fc18
--- /dev/null
+++ b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_uuid.p.gz b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_uuid.p.gz
new file mode 100644
index 000000000..66583beb1
--- /dev/null
+++ b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_wmi.p.gz b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_wmi.p.gz
new file mode 100644
index 000000000..efdef819f
--- /dev/null
+++ b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_wmi.p.gz b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_wmi.p.gz
new file mode 100644
index 000000000..5edd6f147
--- /dev/null
+++ b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_os.p.gz
new file mode 100644
index 000000000..009a2d45d
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_shutil.p.gz
new file mode 100644
index 000000000..cb7818abb
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_subprocess.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_subprocess.p.gz
new file mode 100644
index 000000000..d4005b336
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_subprocess.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_time.p.gz
new file mode 100644
index 000000000..041d7314a
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_uuid.p.gz
new file mode 100644
index 000000000..cab9cd580
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_os.p.gz
new file mode 100644
index 000000000..0dfe439ca
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_shutil.p.gz
new file mode 100644
index 000000000..17f83545d
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_subprocess.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_subprocess.p.gz
new file mode 100644
index 000000000..1ecf26961
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_subprocess.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_time.p.gz
new file mode 100644
index 000000000..1c68ad11e
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_uuid.p.gz
new file mode 100644
index 000000000..7d4bae7a9
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_wmi.p.gz
new file mode 100644
index 000000000..c1d101887
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_wmi.p.gz
new file mode 100644
index 000000000..2f30402a9
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_shutil.p.gz
new file mode 100644
index 000000000..578b33da7
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_subprocess.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_subprocess.p.gz
new file mode 100644
index 000000000..1da1b4dd0
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_subprocess.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_time.p.gz
new file mode 100644
index 000000000..67798704f
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_uuid.p.gz
new file mode 100644
index 000000000..54585f18c
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_wmi.p.gz
new file mode 100644
index 000000000..61ca098cb
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_os.p.gz
new file mode 100644
index 000000000..5f5a6c383
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_shutil.p.gz
new file mode 100644
index 000000000..61c59ea1f
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_time.p.gz
new file mode 100644
index 000000000..91252758c
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_uuid.p.gz
new file mode 100644
index 000000000..b06fd1371
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_wmi.p.gz
new file mode 100644
index 000000000..c6e9722c2
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_os.p.gz
new file mode 100644
index 000000000..809332508
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_shutil.p.gz
new file mode 100644
index 000000000..d4b9d8921
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_subprocess.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_subprocess.p.gz
new file mode 100644
index 000000000..c6124e1e0
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_subprocess.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_time.p.gz
new file mode 100644
index 000000000..7b7110e06
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_uuid.p.gz
new file mode 100644
index 000000000..6c254032c
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_wmi.p.gz
new file mode 100644
index 000000000..595510cff
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_os.p.gz
new file mode 100644
index 000000000..a292ad56e
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_shutil.p.gz
new file mode 100644
index 000000000..bc29985bd
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_time.p.gz
new file mode 100644
index 000000000..21812b0fa
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_uuid.p.gz
new file mode 100644
index 000000000..13f51b856
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_wmi.p.gz
new file mode 100644
index 000000000..fca5d9328
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_list_instances_detail_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_list_instances_detail_shutil.p.gz
new file mode 100644
index 000000000..3ab35a29f
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_list_instances_detail_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_list_instances_detail_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_list_instances_detail_wmi.p.gz
new file mode 100644
index 000000000..411c0ed07
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_list_instances_detail_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_list_instances_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_list_instances_shutil.p.gz
new file mode 100644
index 000000000..b082714cd
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_list_instances_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_list_instances_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_list_instances_wmi.p.gz
new file mode 100644
index 000000000..103f00b84
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_list_instances_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_os.p.gz
new file mode 100644
index 000000000..3ab274719
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_shutil.p.gz
new file mode 100644
index 000000000..9d89a627d
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_time.p.gz
new file mode 100644
index 000000000..2c6fa5a22
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_uuid.p.gz
new file mode 100644
index 000000000..9a54bbb62
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_os.p.gz
new file mode 100644
index 000000000..0b6aff86d
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_shutil.p.gz
new file mode 100644
index 000000000..51331083e
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_time.p.gz
new file mode 100644
index 000000000..fb5e35662
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_uuid.p.gz
new file mode 100644
index 000000000..d8c75ba3c
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_wmi.p.gz
new file mode 100644
index 000000000..92bbed9da
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_wmi.p.gz
new file mode 100644
index 000000000..bb4535336
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_os.p.gz
new file mode 100644
index 000000000..b2af3e865
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_shutil.p.gz
new file mode 100644
index 000000000..293c9b3cf
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_time.p.gz
new file mode 100644
index 000000000..b43ba2897
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_uuid.p.gz
new file mode 100644
index 000000000..a1b757b60
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_wmi.p.gz
new file mode 100644
index 000000000..f988eca93
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_os.p.gz
new file mode 100644
index 000000000..4d53ded9b
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_shutil.p.gz
new file mode 100644
index 000000000..42e3d8f98
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_time.p.gz
new file mode 100644
index 000000000..e7728c515
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_uuid.p.gz
new file mode 100644
index 000000000..a970cc189
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_wmi.p.gz
new file mode 100644
index 000000000..6b3414f25
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_os.p.gz
new file mode 100644
index 000000000..11910aa8f
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_shutil.p.gz
new file mode 100644
index 000000000..a128ac167
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_time.p.gz
new file mode 100644
index 000000000..b56c849ea
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_uuid.p.gz
new file mode 100644
index 000000000..adf7b4648
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_wmi.p.gz
new file mode 100644
index 000000000..907cf2e25
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_os.p.gz
new file mode 100644
index 000000000..81877dd6e
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_shutil.p.gz
new file mode 100644
index 000000000..33a72e90e
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_time.p.gz
new file mode 100644
index 000000000..ff56a9287
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_uuid.p.gz
new file mode 100644
index 000000000..682dd6d40
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_wmi.p.gz
new file mode 100644
index 000000000..fba91bfff
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_os.p.gz
new file mode 100644
index 000000000..1578751ee
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_shutil.p.gz
new file mode 100644
index 000000000..987eeb6da
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_time.p.gz
new file mode 100644
index 000000000..27495c884
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_uuid.p.gz
new file mode 100644
index 000000000..80d62a9a4
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_wmi.p.gz
new file mode 100644
index 000000000..1ad1d60dc
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_os.p.gz
new file mode 100644
index 000000000..3855ac0dd
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_shutil.p.gz
new file mode 100644
index 000000000..8f1d273f2
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_time.p.gz
new file mode 100644
index 000000000..927204978
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_uuid.p.gz
new file mode 100644
index 000000000..849fd1c8c
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_wmi.p.gz
new file mode 100644
index 000000000..41aa8ccfb
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_os.p.gz
new file mode 100644
index 000000000..e69a69a20
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_shutil.p.gz
new file mode 100644
index 000000000..d5aa712ac
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_time.p.gz
new file mode 100644
index 000000000..db090ad4d
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_uuid.p.gz
new file mode 100644
index 000000000..ae76e5693
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_wmi.p.gz
new file mode 100644
index 000000000..8e4e9bd65
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_shutil.p.gz
new file mode 100644
index 000000000..991858501
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_uuid.p.gz
new file mode 100644
index 000000000..f4a514e5c
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_wmi.p.gz
new file mode 100644
index 000000000..3916fc0fb
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_os.p.gz
new file mode 100644
index 000000000..de1f831de
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_shutil.p.gz
new file mode 100644
index 000000000..751668b6f
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_time.p.gz
new file mode 100644
index 000000000..922fce900
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_uuid.p.gz
new file mode 100644
index 000000000..c79c72334
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_wmi.p.gz
new file mode 100644
index 000000000..3cedfe1ba
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_os.p.gz
new file mode 100644
index 000000000..626398469
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_shutil.p.gz
new file mode 100644
index 000000000..15a83ac0c
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_time.p.gz
new file mode 100644
index 000000000..755cf2e08
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_uuid.p.gz
new file mode 100644
index 000000000..d14db9b2f
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_wmi.p.gz
new file mode 100644
index 000000000..679287e3a
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_os.p.gz
new file mode 100644
index 000000000..ed654b90e
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_shutil.p.gz
new file mode 100644
index 000000000..5b7ff554d
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_time.p.gz
new file mode 100644
index 000000000..d89b52377
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_uuid.p.gz
new file mode 100644
index 000000000..764e6c45e
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_wmi.p.gz
new file mode 100644
index 000000000..a63f4881a
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_os.p.gz
new file mode 100644
index 000000000..607047b38
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_shutil.p.gz
new file mode 100644
index 000000000..4f8b93282
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_time.p.gz
new file mode 100644
index 000000000..429a96d7e
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_uuid.p.gz
new file mode 100644
index 000000000..ac9c25734
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_os.p.gz
new file mode 100644
index 000000000..82b3a6185
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_shutil.p.gz
new file mode 100644
index 000000000..741f28905
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_time.p.gz
new file mode 100644
index 000000000..5c633dc73
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_uuid.p.gz
new file mode 100644
index 000000000..da8c02d81
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_wmi.p.gz
new file mode 100644
index 000000000..9e0baf1cd
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_wmi.p.gz
new file mode 100644
index 000000000..f647f9516
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_os.p.gz
new file mode 100644
index 000000000..cd1356e9e
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_shutil.p.gz
new file mode 100644
index 000000000..8add1aafc
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_time.p.gz
new file mode 100644
index 000000000..c889f9472
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_uuid.p.gz
new file mode 100644
index 000000000..20a8cad07
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_wmi.p.gz
new file mode 100644
index 000000000..9fec601ab
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_os.p.gz
new file mode 100644
index 000000000..4587a6fda
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_shutil.p.gz
new file mode 100644
index 000000000..48cb908c1
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_time.p.gz
new file mode 100644
index 000000000..0d15a012e
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_uuid.p.gz
new file mode 100644
index 000000000..b0b49c932
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_wmi.p.gz
new file mode 100644
index 000000000..574ce071e
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_os.p.gz
new file mode 100644
index 000000000..c19b6e25e
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_shutil.p.gz
new file mode 100644
index 000000000..1d655bb02
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_time.p.gz
new file mode 100644
index 000000000..678b4cd10
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_uuid.p.gz
new file mode 100644
index 000000000..0884a350b
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_wmi.p.gz
new file mode 100644
index 000000000..128b20ac5
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_os.p.gz
new file mode 100644
index 000000000..bc4d4b99d
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_shutil.p.gz
new file mode 100644
index 000000000..8de7c4e71
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_time.p.gz
new file mode 100644
index 000000000..ee94dd6c2
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_uuid.p.gz
new file mode 100644
index 000000000..313bcfa06
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_wmi.p.gz
new file mode 100644
index 000000000..de8064431
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_os.p.gz
new file mode 100644
index 000000000..e852140a1
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_shutil.p.gz
new file mode 100644
index 000000000..f89c63faf
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_time.p.gz
new file mode 100644
index 000000000..12cda7550
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_uuid.p.gz
new file mode 100644
index 000000000..07445af3e
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_wmi.p.gz
new file mode 100644
index 000000000..8e21428f2
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_os.p.gz
new file mode 100644
index 000000000..794d9a09d
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_shutil.p.gz
new file mode 100644
index 000000000..775f8232c
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_time.p.gz
new file mode 100644
index 000000000..d0c0306f2
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_uuid.p.gz
new file mode 100644
index 000000000..3cb6c4b7f
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_wmi.p.gz
new file mode 100644
index 000000000..a48d4aa9b
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_os.p.gz
new file mode 100644
index 000000000..5578f64f8
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_shutil.p.gz
new file mode 100644
index 000000000..224ba464f
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_time.p.gz
new file mode 100644
index 000000000..29c15fe82
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_uuid.p.gz
new file mode 100644
index 000000000..9ac16ec7d
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_wmi.p.gz
new file mode 100644
index 000000000..d6244c3fc
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_wmi.p.gz
Binary files differ
diff --git a/nova/tests/network/test_api.py b/nova/tests/network/test_api.py
index 1ad2b09f1..a29756caa 100644
--- a/nova/tests/network/test_api.py
+++ b/nova/tests/network/test_api.py
@@ -18,7 +18,6 @@
"""Tests for network API"""
from nova import context
-from nova import db
from nova import network
from nova.openstack.common import rpc
from nova import test
diff --git a/nova/tests/policy.json b/nova/tests/policy.json
index c2388d411..a2d948323 100644
--- a/nova/tests/policy.json
+++ b/nova/tests/policy.json
@@ -1,4 +1,5 @@
{
+ "admin": [["role:admin"], ["role:administrator"]],
"compute:create": [],
"compute:create:attach_network": [],
"compute:create:attach_volume": [],
@@ -105,8 +106,7 @@
"compute_extension:networks": [],
"compute_extension:networks:view": [],
"compute_extension:quotas:show": [],
- "compute_extension:quotas:update_for_project": [],
- "compute_extension:quotas:update_for_user": [],
+ "compute_extension:quotas:update": [],
"compute_extension:quota_classes": [],
"compute_extension:rescue": [],
"compute_extension:security_groups": [],
diff --git a/nova/tests/scheduler/test_filter_scheduler.py b/nova/tests/scheduler/test_filter_scheduler.py
index 1ea57b4cf..64bf5415f 100644
--- a/nova/tests/scheduler/test_filter_scheduler.py
+++ b/nova/tests/scheduler/test_filter_scheduler.py
@@ -119,13 +119,13 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
# instance 1
self.driver._provision_resource(
ctxt, 'host1',
- mox.Func(_has_launch_index(0)), None,
- {}, None, None, None, None).AndReturn(instance1)
+ mox.Func(_has_launch_index(0)), {},
+ None, None, None, None, reservations=None).AndReturn(instance1)
# instance 2
self.driver._provision_resource(
ctxt, 'host2',
- mox.Func(_has_launch_index(1)), None,
- {}, None, None, None, None).AndReturn(instance2)
+ mox.Func(_has_launch_index(1)), {},
+ None, None, None, None, reservations=None).AndReturn(instance2)
self.mox.ReplayAll()
self.driver.schedule_run_instance(context_fake, request_spec,
diff --git a/nova/tests/scheduler/test_host_filters.py b/nova/tests/scheduler/test_host_filters.py
index ca5ca5366..d6f083576 100644
--- a/nova/tests/scheduler/test_host_filters.py
+++ b/nova/tests/scheduler/test_host_filters.py
@@ -394,406 +394,323 @@ class HostFiltersTestCase(test.TestCase):
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
- def test_compute_filter_passes_extra_specs_noop(self):
+ def test_compute_filter_passes_same_inst_props(self):
self._stub_service_is_up(True)
- filt_cls = self.class_map['ComputeCapabilitiesFilter']()
- extra_specs = {'opt1': '1111', 'opt2': '222'}
- capabilities = {'enabled': True, 'opt1': '1', 'opt2': '2'}
+ filt_cls = self.class_map['ComputeFilter']()
+ inst_meta = {'system_metadata': {'image_architecture': 'x86_64',
+ 'image_hypervisor_type': 'kvm',
+ 'image_vm_mode': 'hvm'}}
+ req_spec = {'instance_properties': inst_meta}
+ filter_properties = {'instance_type': {'memory_mb': 1024},
+ 'request_spec': req_spec}
+ capabilities = {'enabled': True,
+ 'supported_instances': [
+ ('x86_64', 'kvm', 'hvm')]}
service = {'disabled': False}
- filter_properties = {'instance_type': {'memory_mb': 1024,
- 'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
- def test_compute_filter_passes_extra_specs_noop2(self):
+ def test_compute_filter_fails_different_inst_props(self):
self._stub_service_is_up(True)
- filt_cls = self.class_map['ComputeCapabilitiesFilter']()
- extra_specs = {'opt1': '1111', 'opt2': '222'}
- capabilities = {'enabled': True, 'opt3': '1', 'opt4': '2'}
+ filt_cls = self.class_map['ComputeFilter']()
+ inst_meta = {'system_metadata': {'image_architecture': 'arm',
+ 'image_hypervisor_type': 'qemu',
+ 'image_vm_mode': 'hvm'}}
+ req_spec = {'instance_properties': inst_meta}
+ filter_properties = {'instance_type': {'memory_mb': 1024},
+ 'request_spec': req_spec}
+ capabilities = {'enabled': True,
+ 'supported_instances': [
+ ('x86_64', 'kvm', 'hvm')]}
service = {'disabled': False}
- filter_properties = {'instance_type': {'memory_mb': 1024,
- 'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
- self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))
- def test_compute_filter_passes_extra_specs_noop3(self):
+ def test_compute_filter_passes_partial_inst_props(self):
self._stub_service_is_up(True)
- filt_cls = self.class_map['ComputeCapabilitiesFilter']()
- extra_specs = {'opt1': '', 'opt2': ''}
- capabilities = {'enabled': True, 'opt1': '1', 'opt2': '2'}
+ filt_cls = self.class_map['ComputeFilter']()
+ inst_meta = {'system_metadata': {'image_architecture': 'x86_64',
+ 'image_vm_mode': 'hvm'}}
+ req_spec = {'instance_properties': inst_meta}
+ filter_properties = {'instance_type': {'memory_mb': 1024},
+ 'request_spec': req_spec}
+ capabilities = {'enabled': True,
+ 'supported_instances': [
+ ('x86_64', 'kvm', 'hvm')]}
service = {'disabled': False}
- filter_properties = {'instance_type': {'memory_mb': 1024,
- 'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
- def test_compute_filter_passes_extra_specs_noop4(self):
+ def test_compute_filter_fails_partial_inst_props(self):
self._stub_service_is_up(True)
- filt_cls = self.class_map['ComputeCapabilitiesFilter']()
- extra_specs = {'opt1': '> 4', 'opt2': '< 3'}
- capabilities = {'enabled': True, 'opt1': '2', 'opt2': '5'}
+ filt_cls = self.class_map['ComputeFilter']()
+ inst_meta = {'system_metadata': {'image_architecture': 'x86_64',
+ 'image_vm_mode': 'hvm'}}
+ req_spec = {'instance_properties': inst_meta}
+ filter_properties = {'instance_type': {'memory_mb': 1024},
+ 'request_spec': req_spec}
+ capabilities = {'enabled': True,
+ 'supported_instances': [
+ ('x86_64', 'xen', 'xen')]}
service = {'disabled': False}
- filter_properties = {'instance_type': {'memory_mb': 1024,
- 'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
- self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))
- def test_compute_filter_passes_extra_specs_with_op_eq(self):
+ def test_compute_filter_passes_without_inst_props(self):
self._stub_service_is_up(True)
- filt_cls = self.class_map['ComputeCapabilitiesFilter']()
- extra_specs = {'opt1': '= 123'}
- capabilities = {'enabled': True, 'opt1': '123'}
+ filt_cls = self.class_map['ComputeFilter']()
+ filter_properties = {'instance_type': {'memory_mb': 1024},
+ 'request_spec': {}}
+ capabilities = {'enabled': True,
+ 'supported_instances': [
+ ('x86_64', 'kvm', 'hvm')]}
service = {'disabled': False}
- filter_properties = {'instance_type': {'memory_mb': 1024,
- 'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
- def test_compute_filter_passes_extra_specs_with_op_eq2(self):
+ def test_compute_filter_fails_without_host_props(self):
self._stub_service_is_up(True)
- filt_cls = self.class_map['ComputeCapabilitiesFilter']()
- extra_specs = {'opt1': '= 123'}
- capabilities = {'enabled': True, 'opt1': '124'}
+ filt_cls = self.class_map['ComputeFilter']()
+ inst_meta = {'system_metadata': {'image_architecture': 'x86_64',
+ 'image_hypervisor_type': 'kvm',
+ 'image_vm_mode': 'hvm'}}
+ req_spec = {'instance_properties': inst_meta}
+ filter_properties = {'instance_type': {'memory_mb': 1024},
+ 'request_spec': req_spec}
+ capabilities = {'enabled': True}
service = {'disabled': False}
- filter_properties = {'instance_type': {'memory_mb': 1024,
- 'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
- self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))
- def test_compute_filter_passes_extra_specs_with_op_eq3(self):
+ def _do_test_compute_filter_extra_specs(self, ecaps, especs, passes):
self._stub_service_is_up(True)
filt_cls = self.class_map['ComputeCapabilitiesFilter']()
- extra_specs = {'opt1': '= 123', 'opt2': '= 456'}
- capabilities = {'enabled': True, 'opt1': '124', 'opt2': '456'}
+ capabilities = {'enabled': True}
+ capabilities.update(ecaps)
service = {'disabled': False}
filter_properties = {'instance_type': {'memory_mb': 1024,
- 'extra_specs': extra_specs}}
+ 'extra_specs': especs}}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
- self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ assertion = self.assertTrue if passes else self.assertFalse
+ assertion(filt_cls.host_passes(host, filter_properties))
+
+ def test_compute_filter_passes_extra_specs_simple1(self):
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'opt1': '1', 'opt2': '2'},
+ especs={'opt1': '1'},
+ passes=True)
+
+ def test_compute_filter_passes_extra_specs_simple2(self):
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'opt1': '1', 'opt2': '2'},
+ especs={'opt1': '1', 'opt2': '2'},
+ passes=True)
+
+ def test_compute_filter_fails_extra_specs_simple1(self):
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'opt1': '1', 'opt2': '2'},
+ especs={'opt1': '1111'},
+ passes=False)
+
+ def test_compute_filter_fails_extra_specs_simple2(self):
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'opt1': '1', 'opt2': '2'},
+ especs={'opt1': ''},
+ passes=False)
+
+ def test_compute_filter_fails_extra_specs_simple3(self):
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'opt1': '1', 'opt2': '2'},
+ especs={'opt3': '3'},
+ passes=False)
+
+ def test_compute_filter_fails_extra_specs_simple4(self):
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'opt1': '1', 'opt2': '2'},
+ especs={'opt1': '1', 'opt2': '222'},
+ passes=False)
+
+ def test_compute_filter_fails_extra_specs_simple5(self):
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'opt1': '1', 'opt2': '2'},
+ especs={'opt1': '1111', 'opt2': '222'},
+ passes=False)
+
+ def test_compute_filter_fails_extra_specs_with_bogus_ops(self):
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'opt1': '2', 'opt2': '5'},
+ especs={'opt1': '> 4', 'opt2': '< 3'},
+ passes=False)
- def test_compute_filter_fails_extra_specs_with_op_eq(self):
- self._stub_service_is_up(True)
- filt_cls = self.class_map['ComputeCapabilitiesFilter']()
- extra_specs = {'opt2': '= 234'}
- capabilities = {'enabled': True, 'opt2': '34'}
- service = {'disabled': False}
- filter_properties = {'instance_type': {'memory_mb': 1024,
- 'extra_specs': extra_specs}}
- host = fakes.FakeHostState('host1', 'compute',
- {'free_ram_mb': 1024, 'capabilities': capabilities,
- 'service': service})
+ def test_compute_filter_passes_extra_specs_with_op_eq(self):
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'opt1': '123'},
+ especs={'opt1': '= 123'},
+ passes=True)
- self.assertFalse(filt_cls.host_passes(host, filter_properties))
+ def test_compute_filter_passes_extra_specs_with_op_eq2(self):
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'opt1': '124'},
+ especs={'opt1': '= 123'},
+ passes=True)
- def test_compute_filter_fails_extra_specs_with_op_eq2(self):
- self._stub_service_is_up(True)
- filt_cls = self.class_map['ComputeCapabilitiesFilter']()
- extra_specs = {'opt1': '= 123', 'opt2': '= 456'}
- capabilities = {'enabled': True, 'opt1': '124', 'opt2': '4567'}
- service = {'disabled': False}
- filter_properties = {'instance_type': {'memory_mb': 1024,
- 'extra_specs': extra_specs}}
- host = fakes.FakeHostState('host1', 'compute',
- {'free_ram_mb': 1024, 'capabilities': capabilities,
- 'service': service})
- self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ def test_compute_filter_passes_extra_specs_with_op_eq3(self):
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'opt1': '124', 'opt2': '456'},
+ especs={'opt1': '= 123', 'opt2': '= 456'},
+ passes=True)
+
+ def test_compute_filter_fails_extra_specs_with_op_eq(self):
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'opt2': '34'},
+ especs={'opt2': '= 234'},
+ passes=False)
+
+ def test_compute_filter_passes_extra_specs_with_op_eq2(self):
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'opt1': '124', 'opt2': '4567'},
+ especs={'opt1': '= 123', 'opt2': '= 456'},
+ passes=True)
def test_compute_filter_fails_extra_specs_with_op_eq3(self):
- self._stub_service_is_up(True)
- filt_cls = self.class_map['ComputeCapabilitiesFilter']()
- extra_specs = {'opt1': '='}
- capabilities = {'enabled': True, 'opt1': '124'}
- service = {'disabled': False}
- filter_properties = {'instance_type': {'memory_mb': 1024,
- 'extra_specs': extra_specs}}
- host = fakes.FakeHostState('host1', 'compute',
- {'free_ram_mb': 1024, 'capabilities': capabilities,
- 'service': service})
- self.assertFalse(filt_cls.host_passes(host, filter_properties))
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'opt1': '124'},
+ especs={'opt1': '='},
+ passes=False)
def test_compute_filter_fails_extra_specs_with_op_eq4(self):
- self._stub_service_is_up(True)
- filt_cls = self.class_map['ComputeCapabilitiesFilter']()
- extra_specs = {'opt11': '= 124', 'opt12': '= 456'}
- capabilities = {'enabled': True, 'opt3': '124', 'opt4': '456'}
- service = {'disabled': False}
- filter_properties = {'instance_type': {'memory_mb': 1024,
- 'extra_specs': extra_specs}}
- host = fakes.FakeHostState('host1', 'compute',
- {'free_ram_mb': 1024, 'capabilities': capabilities,
- 'service': service})
- self.assertFalse(filt_cls.host_passes(host, filter_properties))
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'opt3': '124', 'opt4': '456'},
+ especs={'opt11': '= 124', 'opt12': '= 456'},
+ passes=False)
def test_compute_filter_passes_extra_specs_with_op_seq(self):
- self._stub_service_is_up(True)
- filt_cls = self.class_map['ComputeCapabilitiesFilter']()
- extra_specs = {'opt1': 's== 123'}
- capabilities = {'enabled': True, 'opt1': '123'}
- service = {'disabled': False}
- filter_properties = {'instance_type': {'memory_mb': 1024,
- 'extra_specs': extra_specs}}
- host = fakes.FakeHostState('host1', 'compute',
- {'free_ram_mb': 1024, 'capabilities': capabilities,
- 'service': service})
- self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'opt1': '123'},
+ especs={'opt1': 's== 123'},
+ passes=True)
def test_compute_filter_fails_extra_specs_with_op_seq(self):
- self._stub_service_is_up(True)
- filt_cls = self.class_map['ComputeCapabilitiesFilter']()
- extra_specs = {'opt2': 's== 234'}
- capabilities = {'enabled': True, 'opt2': '2345'}
- service = {'disabled': False}
- filter_properties = {'instance_type': {'memory_mb': 1024,
- 'extra_specs': extra_specs}}
- host = fakes.FakeHostState('host1', 'compute',
- {'free_ram_mb': 1024, 'capabilities': capabilities,
- 'service': service})
-
- self.assertFalse(filt_cls.host_passes(host, filter_properties))
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'opt2': '2345'},
+ especs={'opt2': 's== 234'},
+ passes=False)
def test_compute_filter_passes_extra_specs_with_op_sneq(self):
- self._stub_service_is_up(True)
- filt_cls = self.class_map['ComputeCapabilitiesFilter']()
- extra_specs = {'opt1': 's!= 123'}
- capabilities = {'enabled': True, 'opt1': '11'}
- service = {'disabled': False}
- filter_properties = {'instance_type': {'memory_mb': 1024,
- 'extra_specs': extra_specs}}
- host = fakes.FakeHostState('host1', 'compute',
- {'free_ram_mb': 1024, 'capabilities': capabilities,
- 'service': service})
- self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'opt1': '11'},
+ especs={'opt1': 's!= 123'},
+ passes=True)
def test_compute_filter_fails_extra_specs_with_op_sneq(self):
- self._stub_service_is_up(True)
- filt_cls = self.class_map['ComputeCapabilitiesFilter']()
- extra_specs = {'opt2': 's!= 234'}
- capabilities = {'enabled': True, 'opt2': '234'}
- service = {'disabled': False}
- filter_properties = {'instance_type': {'memory_mb': 1024,
- 'extra_specs': extra_specs}}
- host = fakes.FakeHostState('host1', 'compute',
- {'free_ram_mb': 1024, 'capabilities': capabilities,
- 'service': service})
-
- self.assertFalse(filt_cls.host_passes(host, filter_properties))
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'opt2': '234'},
+ especs={'opt2': 's!= 234'},
+ passes=False)
def test_compute_filter_passes_extra_specs_with_op_sgle(self):
- self._stub_service_is_up(True)
- filt_cls = self.class_map['ComputeCapabilitiesFilter']()
- extra_specs = {'opt1': 's<= 123', 'opt2': 's>= 43'}
- capabilities = {'enabled': True, 'opt1': '11', 'opt2': '543'}
- service = {'disabled': False}
- filter_properties = {'instance_type': {'memory_mb': 1024,
- 'extra_specs': extra_specs}}
- host = fakes.FakeHostState('host1', 'compute',
- {'free_ram_mb': 1024, 'capabilities': capabilities,
- 'service': service})
- self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'opt1': '11', 'opt2': '543'},
+ especs={'opt1': 's<= 123', 'opt2': 's>= 43'},
+ passes=True)
def test_compute_filter_fails_extra_specs_with_op_sge(self):
- self._stub_service_is_up(True)
- filt_cls = self.class_map['ComputeCapabilitiesFilter']()
- extra_specs = {'opt2': 's>= 234'}
- capabilities = {'enabled': True, 'opt2': '1000'}
- service = {'disabled': False}
- filter_properties = {'instance_type': {'memory_mb': 1024,
- 'extra_specs': extra_specs}}
- host = fakes.FakeHostState('host1', 'compute',
- {'free_ram_mb': 1024, 'capabilities': capabilities,
- 'service': service})
-
- self.assertFalse(filt_cls.host_passes(host, filter_properties))
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'opt2': '1000'},
+ especs={'opt2': 's>= 234'},
+ passes=False)
def test_compute_filter_fails_extra_specs_with_op_sle(self):
- self._stub_service_is_up(True)
- filt_cls = self.class_map['ComputeCapabilitiesFilter']()
- extra_specs = {'opt2': 's<= 1000'}
- capabilities = {'enabled': True, 'opt2': '234'}
- service = {'disabled': False}
- filter_properties = {'instance_type': {'memory_mb': 1024,
- 'extra_specs': extra_specs}}
- host = fakes.FakeHostState('host1', 'compute',
- {'free_ram_mb': 1024, 'capabilities': capabilities,
- 'service': service})
-
- self.assertFalse(filt_cls.host_passes(host, filter_properties))
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'opt2': '234'},
+ especs={'opt2': 's<= 1000'},
+ passes=False)
def test_compute_filter_passes_extra_specs_with_op_sgl(self):
- self._stub_service_is_up(True)
- filt_cls = self.class_map['ComputeCapabilitiesFilter']()
- extra_specs = {'opt1': 's< 123', 'opt2': 's> 43'}
- capabilities = {'enabled': True, 'opt1': '11', 'opt2': '543'}
- service = {'disabled': False}
- filter_properties = {'instance_type': {'memory_mb': 1024,
- 'extra_specs': extra_specs}}
- host = fakes.FakeHostState('host1', 'compute',
- {'free_ram_mb': 1024, 'capabilities': capabilities,
- 'service': service})
- self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'opt1': '11', 'opt2': '543'},
+ especs={'opt1': 's< 123', 'opt2': 's> 43'},
+ passes=True)
def test_compute_filter_fails_extra_specs_with_op_sl(self):
- self._stub_service_is_up(True)
- filt_cls = self.class_map['ComputeCapabilitiesFilter']()
- extra_specs = {'opt2': 's< 12'}
- capabilities = {'enabled': True, 'opt2': '2'}
- service = {'disabled': False}
- filter_properties = {'instance_type': {'memory_mb': 1024,
- 'extra_specs': extra_specs}}
- host = fakes.FakeHostState('host1', 'compute',
- {'free_ram_mb': 1024, 'capabilities': capabilities,
- 'service': service})
-
- self.assertFalse(filt_cls.host_passes(host, filter_properties))
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'opt2': '2'},
+ especs={'opt2': 's< 12'},
+ passes=False)
def test_compute_filter_fails_extra_specs_with_op_sg(self):
- self._stub_service_is_up(True)
- filt_cls = self.class_map['ComputeCapabilitiesFilter']()
- extra_specs = {'opt2': 's> 2'}
- capabilities = {'enabled': True, 'opt2': '12'}
- service = {'disabled': False}
- filter_properties = {'instance_type': {'memory_mb': 1024,
- 'extra_specs': extra_specs}}
- host = fakes.FakeHostState('host1', 'compute',
- {'free_ram_mb': 1024, 'capabilities': capabilities,
- 'service': service})
-
- self.assertFalse(filt_cls.host_passes(host, filter_properties))
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'opt2': '12'},
+ especs={'opt2': 's> 2'},
+ passes=False)
def test_compute_filter_passes_extra_specs_with_op_in(self):
- self._stub_service_is_up(True)
- filt_cls = self.class_map['ComputeCapabilitiesFilter']()
- extra_specs = {'opt1': '<in> 11'}
- capabilities = {'enabled': True, 'opt1': '12311321'}
- service = {'disabled': False}
- filter_properties = {'instance_type': {'memory_mb': 1024,
- 'extra_specs': extra_specs}}
- host = fakes.FakeHostState('host1', 'compute',
- {'free_ram_mb': 1024, 'capabilities': capabilities,
- 'service': service})
- self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'opt1': '12311321'},
+ especs={'opt1': '<in> 11'},
+ passes=True)
def test_compute_filter_passes_extra_specs_with_op_in2(self):
- self._stub_service_is_up(True)
- filt_cls = self.class_map['ComputeCapabilitiesFilter']()
- extra_specs = {'opt1': '<in> 12311321'}
- capabilities = {'enabled': True, 'opt1': '12311321'}
- service = {'disabled': False}
- filter_properties = {'instance_type': {'memory_mb': 1024,
- 'extra_specs': extra_specs}}
- host = fakes.FakeHostState('host1', 'compute',
- {'free_ram_mb': 1024, 'capabilities': capabilities,
- 'service': service})
- self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'opt1': '12311321'},
+ especs={'opt1': '<in> 12311321'},
+ passes=True)
def test_compute_filter_fails_extra_specs_with_op_in(self):
- self._stub_service_is_up(True)
- filt_cls = self.class_map['ComputeCapabilitiesFilter']()
- extra_specs = {'opt1': '<in> 11'}
- capabilities = {'enabled': True, 'opt1': '12310321'}
- service = {'disabled': False}
- filter_properties = {'instance_type': {'memory_mb': 1024,
- 'extra_specs': extra_specs}}
- host = fakes.FakeHostState('host1', 'compute',
- {'free_ram_mb': 1024, 'capabilities': capabilities,
- 'service': service})
-
- self.assertFalse(filt_cls.host_passes(host, filter_properties))
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'opt1': '12310321'},
+ especs={'opt1': '<in> 11'},
+ passes=False)
def test_compute_filter_passes_extra_specs_with_op_or(self):
- self._stub_service_is_up(True)
- filt_cls = self.class_map['ComputeCapabilitiesFilter']()
- extra_specs = {'opt1': '<or> 11 <or> 12'}
- capabilities = {'enabled': True, 'opt1': '12'}
- service = {'disabled': False}
- filter_properties = {'instance_type': {'memory_mb': 1024,
- 'extra_specs': extra_specs}}
- host = fakes.FakeHostState('host1', 'compute',
- {'free_ram_mb': 1024, 'capabilities': capabilities,
- 'service': service})
- self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'opt1': '12'},
+ especs={'opt1': '<or> 11 <or> 12'},
+ passes=True)
def test_compute_filter_fails_extra_specs_with_op_or(self):
- self._stub_service_is_up(True)
- filt_cls = self.class_map['ComputeCapabilitiesFilter']()
- extra_specs = {'opt1': '<or> 11 <or> 12'}
- capabilities = {'enabled': True, 'opt1': '13'}
- service = {'disabled': False}
- filter_properties = {'instance_type': {'memory_mb': 1024,
- 'extra_specs': extra_specs}}
- host = fakes.FakeHostState('host1', 'compute',
- {'free_ram_mb': 1024, 'capabilities': capabilities,
- 'service': service})
-
- self.assertFalse(filt_cls.host_passes(host, filter_properties))
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'opt1': '13'},
+ especs={'opt1': '<or> 11 <or> 12'},
+ passes=False)
def test_compute_filter_passes_extra_specs_with_op_le(self):
- self._stub_service_is_up(True)
- filt_cls = self.class_map['ComputeCapabilitiesFilter']()
- extra_specs = {'opt1': '<= 10', 'opt2': '<= 20'}
- capabilities = {'enabled': True, 'opt1': 2, 'opt2': 2}
- service = {'disabled': False}
- filter_properties = {'instance_type': {'memory_mb': 1024,
- 'extra_specs': extra_specs}}
- host = fakes.FakeHostState('host1', 'compute',
- {'free_ram_mb': 1024, 'capabilities': capabilities,
- 'service': service})
- self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'opt1': 2, 'opt2': 2},
+ especs={'opt1': '<= 10', 'opt2': '<= 20'},
+ passes=True)
def test_compute_filter_fails_extra_specs_with_op_le(self):
- self._stub_service_is_up(True)
- filt_cls = self.class_map['ComputeCapabilitiesFilter']()
- extra_specs = {'opt1': '<= 2', 'opt2': '<= 2'}
- capabilities = {'enabled': True, 'opt1': 1, 'opt2': 3}
- service = {'disabled': False}
- filter_properties = {'instance_type': {'memory_mb': 1024,
- 'extra_specs': extra_specs}}
- host = fakes.FakeHostState('host1', 'compute',
- {'free_ram_mb': 1024, 'capabilities': capabilities,
- 'service': service})
-
- self.assertFalse(filt_cls.host_passes(host, filter_properties))
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'opt1': 1, 'opt2': 3},
+ especs={'opt1': '<= 2', 'opt2': '<= 2'},
+ passes=False)
def test_compute_filter_passes_extra_specs_with_op_ge(self):
- self._stub_service_is_up(True)
- filt_cls = self.class_map['ComputeCapabilitiesFilter']()
- extra_specs = {'opt1': '>= 1', 'opt2': '>= 2'}
- capabilities = {'enabled': True, 'opt1': 2, 'opt2': 2}
- service = {'disabled': False}
- filter_properties = {'instance_type': {'memory_mb': 1024,
- 'extra_specs': extra_specs}}
- host = fakes.FakeHostState('host1', 'compute',
- {'free_ram_mb': 1024, 'capabilities': capabilities,
- 'service': service})
- self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'opt1': 2, 'opt2': 2},
+ especs={'opt1': '>= 1', 'opt2': '>= 2'},
+ passes=True)
def test_compute_filter_fails_extra_specs_with_op_ge(self):
- self._stub_service_is_up(True)
- filt_cls = self.class_map['ComputeCapabilitiesFilter']()
- extra_specs = {'opt1': '>= 2', 'opt2': '>= 2'}
- capabilities = {'enabled': True, 'opt1': 1, 'opt2': 2}
- service = {'disabled': False}
- filter_properties = {'instance_type': {'memory_mb': 1024,
- 'extra_specs': extra_specs}}
- host = fakes.FakeHostState('host1', 'compute',
- {'free_ram_mb': 1024, 'capabilities': capabilities,
- 'service': service})
-
- self.assertFalse(filt_cls.host_passes(host, filter_properties))
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'opt1': 1, 'opt2': 2},
+ especs={'opt1': '>= 2', 'opt2': '>= 2'},
+ passes=False)
def test_aggregate_filter_passes_no_extra_specs(self):
self._stub_service_is_up(True)
diff --git a/nova/tests/scheduler/test_host_manager.py b/nova/tests/scheduler/test_host_manager.py
index 01521c65d..45ad1021b 100644
--- a/nova/tests/scheduler/test_host_manager.py
+++ b/nova/tests/scheduler/test_host_manager.py
@@ -16,7 +16,6 @@
Tests For HostManager
"""
-import datetime
from nova import db
from nova import exception
diff --git a/nova/tests/scheduler/test_rpcapi.py b/nova/tests/scheduler/test_rpcapi.py
index ce1fd4efe..a2fee64b2 100644
--- a/nova/tests/scheduler/test_rpcapi.py
+++ b/nova/tests/scheduler/test_rpcapi.py
@@ -43,8 +43,6 @@ class SchedulerRpcAPITestCase(test.TestCase):
expected_version = kwargs.pop('version', rpcapi.BASE_RPC_API_VERSION)
expected_msg = rpcapi.make_msg(method, **kwargs)
expected_msg['version'] = expected_version
- if rpc_method == 'cast' and method == 'run_instance':
- kwargs['call'] = False
self.fake_args = None
self.fake_kwargs = None
@@ -64,21 +62,13 @@ class SchedulerRpcAPITestCase(test.TestCase):
for arg, expected_arg in zip(self.fake_args, expected_args):
self.assertEqual(arg, expected_arg)
- def test_run_instance_call(self):
- self._test_scheduler_api('run_instance', rpc_method='call',
- request_spec='fake_request_spec',
- admin_password='pw', injected_files='fake_injected_files',
- requested_networks='fake_requested_networks',
- is_first_time=True, filter_properties='fake_filter_properties',
- reservations=None, version='1.2')
-
- def test_run_instance_cast(self):
+ def test_run_instance(self):
self._test_scheduler_api('run_instance', rpc_method='cast',
request_spec='fake_request_spec',
admin_password='pw', injected_files='fake_injected_files',
requested_networks='fake_requested_networks',
is_first_time=True, filter_properties='fake_filter_properties',
- reservations=None, version='1.2')
+ version='1.6')
def test_prep_resize(self):
self._test_scheduler_api('prep_resize', rpc_method='cast',
diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py
index c242bb62b..bbb63ab51 100644
--- a/nova/tests/scheduler/test_scheduler.py
+++ b/nova/tests/scheduler/test_scheduler.py
@@ -31,7 +31,6 @@ from nova import flags
from nova import notifications
from nova.openstack.common import jsonutils
from nova.openstack.common import rpc
-from nova.openstack.common.rpc import common as rpc_common
from nova.openstack.common import timeutils
from nova.scheduler import driver
from nova.scheduler import manager
diff --git a/nova/tests/test_configdrive2.py b/nova/tests/test_configdrive2.py
index b9467f258..77c9d12a6 100644
--- a/nova/tests/test_configdrive2.py
+++ b/nova/tests/test_configdrive2.py
@@ -18,7 +18,6 @@
import mox
import os
-import subprocess
import tempfile
from nova import test
diff --git a/nova/tests/test_context.py b/nova/tests/test_context.py
index 977095910..0915bf157 100644
--- a/nova/tests/test_context.py
+++ b/nova/tests/test_context.py
@@ -26,6 +26,12 @@ class ContextTestCase(test.TestCase):
roles=['admin', 'weasel'])
self.assertEquals(ctxt.is_admin, True)
+ def test_request_context_sets_is_admin_by_role(self):
+ ctxt = context.RequestContext('111',
+ '222',
+ roles=['administrator'])
+ self.assertEquals(ctxt.is_admin, True)
+
def test_request_context_sets_is_admin_upcase(self):
ctxt = context.RequestContext('111',
'222',
diff --git a/nova/tests/test_hypervapi.py b/nova/tests/test_hypervapi.py
new file mode 100644
index 000000000..8c4296dd3
--- /dev/null
+++ b/nova/tests/test_hypervapi.py
@@ -0,0 +1,463 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 Cloudbase Solutions Srl
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Test suite for the Hyper-V driver and related APIs.
+"""
+
+import os
+import shutil
+import sys
+import uuid
+
+from nova.compute import power_state
+from nova import context
+from nova import db
+from nova import flags
+from nova.image import glance
+from nova.tests import fake_network
+from nova.tests.hyperv import basetestcase
+from nova.tests.hyperv import db_fakes
+from nova.tests.hyperv import hypervutils
+from nova.tests.hyperv import mockproxy
+import nova.tests.image.fake as fake_image
+from nova.virt.hyperv import constants
+from nova.virt.hyperv import driver as driver_hyperv
+from nova.virt.hyperv import vmutils
+from nova.virt import images
+
+
+class HyperVAPITestCase(basetestcase.BaseTestCase):
+ """Unit tests for Hyper-V driver calls."""
+
+ def setUp(self):
+ super(HyperVAPITestCase, self).setUp()
+
+ self._user_id = 'fake'
+ self._project_id = 'fake'
+ self._instance_data = None
+ self._image_metadata = None
+ self._dest_server = None
+ self._fetched_image = None
+ self._update_image_raise_exception = False
+ self._post_method_called = False
+ self._recover_method_called = False
+ self._volume_target_portal = '192.168.1.112:3260'
+ self._volume_id = '10958016-e196-42e3-9e7f-5d8927ae3099'
+ self._context = context.RequestContext(self._user_id, self._project_id)
+
+ self._setup_stubs()
+
+ self.flags(instances_path=r'C:\Hyper-V\test\instances',
+ vswitch_name='external')
+
+ self._hypervutils = hypervutils.HyperVUtils()
+ self._conn = driver_hyperv.HyperVDriver()
+
+ def _setup_stubs(self):
+ db_fakes.stub_out_db_instance_api(self.stubs)
+ fake_image.stub_out_image_service(self.stubs)
+
+ def fake_fetch(context, image_id, target, user, project):
+ self._fetched_image = target
+ if not os.path.exists(target):
+ self._hypervutils.create_vhd(target)
+ self.stubs.Set(images, 'fetch', fake_fetch)
+
+ def fake_get_remote_image_service(context, name):
+ class FakeGlanceImageService(object):
+ def update(self_fake, context, image_id, image_metadata, f):
+ if self._update_image_raise_exception:
+ raise vmutils.HyperVException(
+ "Simulated update failure")
+ self._image_metadata = image_metadata
+ return (FakeGlanceImageService(), 1)
+ self.stubs.Set(glance, 'get_remote_image_service',
+ fake_get_remote_image_service)
+
+ # Modules to mock
+ modules_to_mock = [
+ 'wmi',
+ 'os',
+ 'shutil',
+ 'uuid',
+ 'time',
+ 'subprocess',
+ 'multiprocessing',
+ '_winreg'
+ ]
+
+ # Modules in which the mocks are going to be injected
+ from nova.virt.hyperv import baseops
+ from nova.virt.hyperv import livemigrationops
+ from nova.virt.hyperv import snapshotops
+ from nova.virt.hyperv import vmops
+ from nova.virt.hyperv import volumeops
+ from nova.virt.hyperv import volumeutils
+
+ modules_to_test = [
+ driver_hyperv,
+ baseops,
+ vmops,
+ vmutils,
+ volumeops,
+ volumeutils,
+ snapshotops,
+ livemigrationops,
+ hypervutils,
+ sys.modules[__name__]
+ ]
+
+ self._inject_mocks_in_modules(modules_to_mock, modules_to_test)
+
+ if isinstance(snapshotops.wmi, mockproxy.Mock):
+ from nova.virt.hyperv import ioutils
+ import StringIO
+
+ def fake_open(name, mode):
+ return StringIO.StringIO("fake file content")
+ self.stubs.Set(ioutils, 'open', fake_open)
+
+ def tearDown(self):
+ try:
+ if self._instance_data and self._hypervutils.vm_exists(
+ self._instance_data["name"]):
+ self._hypervutils.remove_vm(self._instance_data["name"])
+
+ if self._dest_server and \
+ self._hypervutils.remote_vm_exists(self._dest_server,
+ self._instance_data["name"]):
+ self._hypervutils.remove_remote_vm(self._dest_server,
+ self._instance_data["name"])
+
+ self._hypervutils.logout_iscsi_volume_sessions(self._volume_id)
+
+ shutil.rmtree(flags.FLAGS.instances_path, True)
+
+ fake_image.FakeImageService_reset()
+ finally:
+ super(HyperVAPITestCase, self).tearDown()
+
+ def test_list_instances(self):
+ num_vms = self._hypervutils.get_vm_count()
+ instances = self._conn.list_instances()
+
+ self.assertEquals(len(instances), num_vms)
+
+ def test_get_info(self):
+ self._spawn_instance(True)
+ info = self._conn.get_info(self._instance_data)
+
+ self.assertEquals(info["state"], str(power_state.RUNNING))
+
+ def test_spawn_cow_image(self):
+ self._test_spawn_instance(True)
+
+ def test_spawn_no_cow_image(self):
+ self._test_spawn_instance(False)
+
+ def test_spawn_no_vswitch_exception(self):
+ # Set flag to a non existing vswitch
+ self.flags(vswitch_name=str(uuid.uuid4()))
+ self.assertRaises(vmutils.HyperVException, self._spawn_instance, True)
+
+ self.assertFalse(self._hypervutils.vm_exists(
+ self._instance_data["name"]))
+
+ def _test_vm_state_change(self, action, from_state, to_state):
+ self._spawn_instance(True)
+ if from_state:
+ self._hypervutils.set_vm_state(self._instance_data["name"],
+ from_state)
+ action(self._instance_data)
+
+ vmstate = self._hypervutils.get_vm_state(self._instance_data["name"])
+ self.assertEquals(vmstate, to_state)
+
+ def test_pause(self):
+ self._test_vm_state_change(self._conn.pause, None,
+ constants.HYPERV_VM_STATE_PAUSED)
+
+ def test_pause_already_paused(self):
+ self._test_vm_state_change(self._conn.pause,
+ constants.HYPERV_VM_STATE_PAUSED,
+ constants.HYPERV_VM_STATE_PAUSED)
+
+ def test_unpause(self):
+ self._test_vm_state_change(self._conn.unpause,
+ constants.HYPERV_VM_STATE_PAUSED,
+ constants.HYPERV_VM_STATE_ENABLED)
+
+ def test_unpause_already_running(self):
+ self._test_vm_state_change(self._conn.unpause, None,
+ constants.HYPERV_VM_STATE_ENABLED)
+
+ def test_suspend(self):
+ self._test_vm_state_change(self._conn.suspend, None,
+ constants.HYPERV_VM_STATE_SUSPENDED)
+
+ def test_suspend_already_suspended(self):
+ self._test_vm_state_change(self._conn.suspend,
+ constants.HYPERV_VM_STATE_SUSPENDED,
+ constants.HYPERV_VM_STATE_SUSPENDED)
+
+ def test_resume(self):
+ self._test_vm_state_change(self._conn.resume,
+ constants.HYPERV_VM_STATE_SUSPENDED,
+ constants.HYPERV_VM_STATE_ENABLED)
+
+ def test_resume_already_running(self):
+ self._test_vm_state_change(self._conn.resume, None,
+ constants.HYPERV_VM_STATE_ENABLED)
+
+ def test_power_off(self):
+ self._test_vm_state_change(self._conn.power_off, None,
+ constants.HYPERV_VM_STATE_DISABLED)
+
+ def test_power_off_already_powered_off(self):
+ self._test_vm_state_change(self._conn.suspend,
+ constants.HYPERV_VM_STATE_DISABLED,
+ constants.HYPERV_VM_STATE_DISABLED)
+
+ def test_power_on(self):
+ self._test_vm_state_change(self._conn.power_on,
+ constants.HYPERV_VM_STATE_DISABLED,
+ constants.HYPERV_VM_STATE_ENABLED)
+
+ def test_power_on_already_running(self):
+ self._test_vm_state_change(self._conn.power_on, None,
+ constants.HYPERV_VM_STATE_ENABLED)
+
+ def test_reboot(self):
+ self._spawn_instance(True)
+
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs,
+ spectacular=True)
+ self._conn.reboot(self._instance_data, network_info, None)
+
+ vmstate = self._hypervutils.get_vm_state(self._instance_data["name"])
+ self.assertEquals(vmstate, constants.HYPERV_VM_STATE_ENABLED)
+
+ def test_destroy(self):
+ self._spawn_instance(True)
+ (vhd_paths, _) = self._hypervutils.get_vm_disks(
+ self._instance_data["name"])
+
+ self._conn.destroy(self._instance_data)
+
+ self.assertFalse(self._hypervutils.vm_exists(
+ self._instance_data["name"]))
+ self._instance_data = None
+
+ for vhd_path in vhd_paths:
+ self.assertFalse(os.path.exists(vhd_path))
+
+ def test_live_migration(self):
+ self.flags(limit_cpu_features=True)
+ self._spawn_instance(False)
+
+ # Existing server
+ self._dest_server = "HV12RCTest1"
+
+ self._live_migration(self._dest_server)
+
+ instance_name = self._instance_data["name"]
+ self.assertFalse(self._hypervutils.vm_exists(instance_name))
+ self.assertTrue(self._hypervutils.remote_vm_exists(self._dest_server,
+ instance_name))
+
+ self.assertTrue(self._post_method_called)
+ self.assertFalse(self._recover_method_called)
+
+ def test_live_migration_with_target_failure(self):
+ self.flags(limit_cpu_features=True)
+ self._spawn_instance(False)
+
+ dest_server = "nonexistingserver"
+
+ exception_raised = False
+ try:
+ self._live_migration(dest_server)
+ except Exception:
+ exception_raised = True
+
+ # Cannot use assertRaises with pythoncom.com_error on Linux
+ self.assertTrue(exception_raised)
+
+ instance_name = self._instance_data["name"]
+ self.assertTrue(self._hypervutils.vm_exists(instance_name))
+
+ self.assertFalse(self._post_method_called)
+ self.assertTrue(self._recover_method_called)
+
+ def _live_migration(self, dest_server):
+ def fake_post_method(context, instance_ref, dest, block_migration):
+ self._post_method_called = True
+
+ def fake_recover_method(context, instance_ref, dest, block_migration):
+ self._recover_method_called = True
+
+ self._conn.live_migration(self._context, self._instance_data,
+ dest_server, fake_post_method, fake_recover_method)
+
+ def test_pre_live_migration_cow_image(self):
+ self._test_pre_live_migration(True)
+
+ def test_pre_live_migration_no_cow_image(self):
+ self._test_pre_live_migration(False)
+
+ def _test_pre_live_migration(self, cow):
+ self.flags(use_cow_images=cow)
+
+ instance_name = 'openstack_unit_test_vm_' + str(uuid.uuid4())
+
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs,
+ spectacular=True)
+ instance_data = db_fakes.get_fake_instance_data(instance_name,
+ self._project_id, self._user_id)
+ block_device_info = None
+
+ self._conn.pre_live_migration(self._context, instance_data,
+ block_device_info, network_info)
+
+ if cow:
+ self.assertTrue(not self._fetched_image is None)
+ else:
+ self.assertTrue(self._fetched_image is None)
+
+ def test_snapshot_with_update_failure(self):
+ self._spawn_instance(True)
+
+ self._update_image_raise_exception = True
+ snapshot_name = 'test_snapshot_' + str(uuid.uuid4())
+ self.assertRaises(vmutils.HyperVException, self._conn.snapshot,
+ self._context, self._instance_data, snapshot_name)
+
+ # assert VM snapshots have been removed
+ self.assertEquals(self._hypervutils.get_vm_snapshots_count(
+ self._instance_data["name"]), 0)
+
+ def test_snapshot(self):
+ self._spawn_instance(True)
+
+ snapshot_name = 'test_snapshot_' + str(uuid.uuid4())
+ self._conn.snapshot(self._context, self._instance_data, snapshot_name)
+
+ self.assertTrue(self._image_metadata and
+ "disk_format" in self._image_metadata and
+ self._image_metadata["disk_format"] == "vhd")
+
+ # assert VM snapshots have been removed
+ self.assertEquals(self._hypervutils.get_vm_snapshots_count(
+ self._instance_data["name"]), 0)
+
+ def _spawn_instance(self, cow, block_device_info=None):
+ self.flags(use_cow_images=cow)
+
+ instance_name = 'openstack_unit_test_vm_' + str(uuid.uuid4())
+
+ self._instance_data = db_fakes.get_fake_instance_data(instance_name,
+ self._project_id, self._user_id)
+ instance = db.instance_create(self._context, self._instance_data)
+
+ image = db_fakes.get_fake_image_data(self._project_id, self._user_id)
+
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs,
+ spectacular=True)
+
+ self._conn.spawn(self._context, instance, image, network_info,
+ block_device_info)
+
+ def _test_spawn_instance(self, cow):
+ self._spawn_instance(cow)
+
+ self.assertTrue(self._hypervutils.vm_exists(
+ self._instance_data["name"]))
+
+ vmstate = self._hypervutils.get_vm_state(self._instance_data["name"])
+ self.assertEquals(vmstate, constants.HYPERV_VM_STATE_ENABLED)
+
+ (vhd_paths, _) = self._hypervutils.get_vm_disks(
+ self._instance_data["name"])
+ self.assertEquals(len(vhd_paths), 1)
+
+ parent_path = self._hypervutils.get_vhd_parent_path(vhd_paths[0])
+ if cow:
+ self.assertTrue(not parent_path is None)
+ self.assertEquals(self._fetched_image, parent_path)
+ else:
+ self.assertTrue(parent_path is None)
+ self.assertEquals(self._fetched_image, vhd_paths[0])
+
+ def _attach_volume(self):
+ self._spawn_instance(True)
+ connection_info = db_fakes.get_fake_volume_info_data(
+ self._volume_target_portal, self._volume_id)
+
+ self._conn.attach_volume(connection_info,
+ self._instance_data["name"], '/dev/sdc')
+
+ def test_attach_volume(self):
+ self._attach_volume()
+
+ (_, volumes_paths) = self._hypervutils.get_vm_disks(
+ self._instance_data["name"])
+ self.assertEquals(len(volumes_paths), 1)
+
+ sessions_exist = self._hypervutils.iscsi_volume_sessions_exist(
+ self._volume_id)
+ self.assertTrue(sessions_exist)
+
+ def test_detach_volume(self):
+ self._attach_volume()
+ connection_info = db_fakes.get_fake_volume_info_data(
+ self._volume_target_portal, self._volume_id)
+
+ self._conn.detach_volume(connection_info,
+ self._instance_data["name"], '/dev/sdc')
+
+ (_, volumes_paths) = self._hypervutils.get_vm_disks(
+ self._instance_data["name"])
+ self.assertEquals(len(volumes_paths), 0)
+
+ sessions_exist = self._hypervutils.iscsi_volume_sessions_exist(
+ self._volume_id)
+ self.assertFalse(sessions_exist)
+
+ def test_boot_from_volume(self):
+ block_device_info = db_fakes.get_fake_block_device_info(
+ self._volume_target_portal, self._volume_id)
+
+ self._spawn_instance(False, block_device_info)
+
+ (_, volumes_paths) = self._hypervutils.get_vm_disks(
+ self._instance_data["name"])
+
+ self.assertEquals(len(volumes_paths), 1)
+
+ sessions_exist = self._hypervutils.iscsi_volume_sessions_exist(
+ self._volume_id)
+ self.assertTrue(sessions_exist)
+
+ def test_attach_volume_with_target_connection_failure(self):
+ self._spawn_instance(True)
+
+ target = 'nonexistingtarget:3260'
+ connection_info = db_fakes.get_fake_volume_info_data(target,
+ self._volume_id)
+
+ self.assertRaises(vmutils.HyperVException, self._conn.attach_volume,
+ connection_info, self._instance_data["name"], '/dev/sdc')
diff --git a/nova/tests/test_iptables_network.py b/nova/tests/test_iptables_network.py
index 0d7e54723..166e28a5c 100644
--- a/nova/tests/test_iptables_network.py
+++ b/nova/tests/test_iptables_network.py
@@ -35,21 +35,26 @@ class IptablesManagerTestCase(test.TestCase):
':nova-compute-local - [0:0]',
':nova-compute-OUTPUT - [0:0]',
':nova-filter-top - [0:0]',
- '-A FORWARD -j nova-filter-top ',
- '-A OUTPUT -j nova-filter-top ',
- '-A nova-filter-top -j nova-compute-local ',
- '-A INPUT -j nova-compute-INPUT ',
- '-A OUTPUT -j nova-compute-OUTPUT ',
- '-A FORWARD -j nova-compute-FORWARD ',
- '-A INPUT -i virbr0 -p udp -m udp --dport 53 -j ACCEPT ',
- '-A INPUT -i virbr0 -p tcp -m tcp --dport 53 -j ACCEPT ',
- '-A INPUT -i virbr0 -p udp -m udp --dport 67 -j ACCEPT ',
- '-A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
- '-A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
- '-A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
- '-A FORWARD -o virbr0 -j REJECT --reject-with '
+ '[0:0] -A FORWARD -j nova-filter-top ',
+ '[0:0] -A OUTPUT -j nova-filter-top ',
+ '[0:0] -A nova-filter-top -j nova-compute-local ',
+ '[0:0] -A INPUT -j nova-compute-INPUT ',
+ '[0:0] -A OUTPUT -j nova-compute-OUTPUT ',
+ '[0:0] -A FORWARD -j nova-compute-FORWARD ',
+ '[0:0] -A INPUT -i virbr0 -p udp -m udp --dport 53 '
+ '-j ACCEPT ',
+ '[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 53 '
+ '-j ACCEPT ',
+ '[0:0] -A INPUT -i virbr0 -p udp -m udp --dport 67 '
+ '-j ACCEPT ',
+ '[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 67 '
+ '-j ACCEPT ',
+ '[0:0] -A FORWARD -s 192.168.122.0/24 -i virbr0 '
+ '-j ACCEPT ',
+ '[0:0] -A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
+ '[0:0] -A FORWARD -o virbr0 -j REJECT --reject-with '
'icmp-port-unreachable ',
- '-A FORWARD -i virbr0 -j REJECT --reject-with '
+ '[0:0] -A FORWARD -i virbr0 -j REJECT --reject-with '
'icmp-port-unreachable ',
'COMMIT',
'# Completed on Fri Feb 18 15:17:05 2011']
@@ -66,12 +71,13 @@ class IptablesManagerTestCase(test.TestCase):
':nova-compute-PREROUTING - [0:0]',
':nova-compute-POSTROUTING - [0:0]',
':nova-postrouting-bottom - [0:0]',
- '-A PREROUTING -j nova-compute-PREROUTING ',
- '-A OUTPUT -j nova-compute-OUTPUT ',
- '-A POSTROUTING -j nova-compute-POSTROUTING ',
- '-A POSTROUTING -j nova-postrouting-bottom ',
- '-A nova-postrouting-bottom -j nova-compute-SNATTING ',
- '-A nova-compute-SNATTING -j nova-compute-floating-ip-snat ',
+ '[0:0] -A PREROUTING -j nova-compute-PREROUTING ',
+ '[0:0] -A OUTPUT -j nova-compute-OUTPUT ',
+ '[0:0] -A POSTROUTING -j nova-compute-POSTROUTING ',
+ '[0:0] -A POSTROUTING -j nova-postrouting-bottom ',
+ '[0:0] -A nova-postrouting-bottom -j nova-compute-SNATTING ',
+ '[0:0] -A nova-compute-SNATTING '
+ '-j nova-compute-floating-ip-snat ',
'COMMIT',
'# Completed on Fri Feb 18 15:17:05 2011']
@@ -85,12 +91,12 @@ class IptablesManagerTestCase(test.TestCase):
table = self.manager.ipv4['filter']
table.add_rule('FORWARD', '-s 1.2.3.4/5 -j DROP')
new_lines = self.manager._modify_rules(current_lines, table)
- self.assertTrue('-A %s-FORWARD '
+ self.assertTrue('[0:0] -A %s-FORWARD '
'-s 1.2.3.4/5 -j DROP' % self.binary_name in new_lines)
table.remove_rule('FORWARD', '-s 1.2.3.4/5 -j DROP')
new_lines = self.manager._modify_rules(current_lines, table)
- self.assertTrue('-A %s-FORWARD '
+ self.assertTrue('[0:0] -A %s-FORWARD '
'-s 1.2.3.4/5 -j DROP' % self.binary_name \
not in new_lines)
@@ -117,7 +123,7 @@ class IptablesManagerTestCase(test.TestCase):
last_postrouting_line = ''
for line in new_lines:
- if line.startswith('-A POSTROUTING'):
+ if line.startswith('[0:0] -A POSTROUTING'):
last_postrouting_line = line
self.assertTrue('-j nova-postrouting-bottom' in last_postrouting_line,
@@ -125,7 +131,7 @@ class IptablesManagerTestCase(test.TestCase):
"nova-postouting-bottom: %s" % last_postrouting_line)
for chain in ['POSTROUTING', 'PREROUTING', 'OUTPUT']:
- self.assertTrue('-A %s -j %s-%s' %
+ self.assertTrue('[0:0] -A %s -j %s-%s' %
(chain, self.binary_name, chain) in new_lines,
"Built-in chain %s not wrapped" % (chain,))
@@ -150,17 +156,17 @@ class IptablesManagerTestCase(test.TestCase):
for chain in ['FORWARD', 'OUTPUT']:
for line in new_lines:
- if line.startswith('-A %s' % chain):
+ if line.startswith('[0:0] -A %s' % chain):
self.assertTrue('-j nova-filter-top' in line,
"First %s rule does not "
"jump to nova-filter-top" % chain)
break
- self.assertTrue('-A nova-filter-top '
+ self.assertTrue('[0:0] -A nova-filter-top '
'-j %s-local' % self.binary_name in new_lines,
"nova-filter-top does not jump to wrapped local chain")
for chain in ['INPUT', 'OUTPUT', 'FORWARD']:
- self.assertTrue('-A %s -j %s-%s' %
+ self.assertTrue('[0:0] -A %s -j %s-%s' %
(chain, self.binary_name, chain) in new_lines,
"Built-in chain %s not wrapped" % (chain,))
diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py
index b45780912..5e5a6eb2a 100644
--- a/nova/tests/test_libvirt.py
+++ b/nova/tests/test_libvirt.py
@@ -31,8 +31,6 @@ from xml.dom import minidom
from nova.api.ec2 import cloud
from nova.compute import instance_types
from nova.compute import power_state
-from nova.compute import rpcapi as compute_rpcapi
-from nova.compute import utils as compute_utils
from nova.compute import vm_mode
from nova.compute import vm_states
from nova import context
@@ -2244,11 +2242,13 @@ class LibvirtConnTestCase(test.TestCase):
guest = config.LibvirtConfigGuest()
guest.ostype = vm_mode.HVM
guest.arch = "x86_64"
+ guest.domtype = ["kvm"]
caps.guests.append(guest)
guest = config.LibvirtConfigGuest()
guest.ostype = vm_mode.HVM
guest.arch = "i686"
+ guest.domtype = ["kvm"]
caps.guests.append(guest)
return caps
@@ -2655,6 +2655,36 @@ class LibvirtConnTestCase(test.TestCase):
}
self.assertEqual(actual, expect)
+ def test_get_instance_capabilities(self):
+ conn = libvirt_driver.LibvirtDriver(True)
+
+ def get_host_capabilities_stub(self):
+ caps = config.LibvirtConfigCaps()
+
+ guest = config.LibvirtConfigGuest()
+ guest.ostype = 'hvm'
+ guest.arch = 'x86_64'
+ guest.domtype = ['kvm', 'qemu']
+ caps.guests.append(guest)
+
+ guest = config.LibvirtConfigGuest()
+ guest.ostype = 'hvm'
+ guest.arch = 'i686'
+ guest.domtype = ['kvm']
+ caps.guests.append(guest)
+
+ return caps
+
+ self.stubs.Set(libvirt_driver.LibvirtDriver,
+ 'get_host_capabilities',
+ get_host_capabilities_stub)
+
+ want = [('x86_64', 'kvm', 'hvm'),
+ ('x86_64', 'qemu', 'hvm'),
+ ('i686', 'kvm', 'hvm')]
+ got = conn.get_instance_capabilities()
+ self.assertEqual(want, got)
+
class HostStateTestCase(test.TestCase):
@@ -2663,6 +2693,7 @@ class HostStateTestCase(test.TestCase):
'"fxsr", "clflush", "pse36", "pat", "cmov", "mca", "pge", '
'"mtrr", "sep", "apic"], '
'"topology": {"cores": "1", "threads": "1", "sockets": "1"}}')
+ instance_caps = [("x86_64", "kvm", "hvm"), ("i686", "kvm", "hvm")]
class FakeConnection(object):
"""Fake connection object"""
@@ -2704,6 +2735,9 @@ class HostStateTestCase(test.TestCase):
def get_disk_available_least(self):
return 13091
+ def get_instance_capabilities(self):
+ return HostStateTestCase.instance_caps
+
def test_update_status(self):
self.mox.StubOutWithMock(libvirt_driver, 'LibvirtDriver')
libvirt_driver.LibvirtDriver(True).AndReturn(self.FakeConnection())
@@ -2787,13 +2821,15 @@ class IptablesFirewallTestCase(test.TestCase):
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [915599:63811649]',
':nova-block-ipv4 - [0:0]',
- '-A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
- '-A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
+ '[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
+ '[0:0] -A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
',ESTABLISHED -j ACCEPT ',
- '-A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
- '-A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
- '-A FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable ',
- '-A FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable ',
+ '[0:0] -A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
+ '[0:0] -A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
+ '[0:0] -A FORWARD -o virbr0 -j REJECT '
+ '--reject-with icmp-port-unreachable ',
+ '[0:0] -A FORWARD -i virbr0 -j REJECT '
+ '--reject-with icmp-port-unreachable ',
'COMMIT',
'# Completed on Mon Dec 6 11:54:13 2010',
]
@@ -2873,18 +2909,18 @@ class IptablesFirewallTestCase(test.TestCase):
# self.fw.add_instance(instance_ref)
def fake_iptables_execute(*cmd, **kwargs):
process_input = kwargs.get('process_input', None)
- if cmd == ('ip6tables-save', '-t', 'filter'):
+ if cmd == ('ip6tables-save', '-c', '-t', 'filter'):
return '\n'.join(self.in6_filter_rules), None
- if cmd == ('iptables-save', '-t', 'filter'):
+ if cmd == ('iptables-save', '-c', '-t', 'filter'):
return '\n'.join(self.in_filter_rules), None
- if cmd == ('iptables-save', '-t', 'nat'):
+ if cmd == ('iptables-save', '-c', '-t', 'nat'):
return '\n'.join(self.in_nat_rules), None
- if cmd == ('iptables-restore',):
+ if cmd == ('iptables-restore', '-c',):
lines = process_input.split('\n')
if '*filter' in lines:
self.out_rules = lines
return '', ''
- if cmd == ('ip6tables-restore',):
+ if cmd == ('ip6tables-restore', '-c',):
lines = process_input.split('\n')
if '*filter' in lines:
self.out6_rules = lines
@@ -2927,27 +2963,29 @@ class IptablesFirewallTestCase(test.TestCase):
self.assertTrue(security_group_chain,
"The security group chain wasn't added")
- regex = re.compile('-A .* -j ACCEPT -p icmp -s 192.168.11.0/24')
+ regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp '
+ '-s 192.168.11.0/24')
self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
"ICMP acceptance rule wasn't added")
- regex = re.compile('-A .* -j ACCEPT -p icmp -m icmp --icmp-type 8'
- ' -s 192.168.11.0/24')
+ regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp -m icmp '
+ '--icmp-type 8 -s 192.168.11.0/24')
self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
"ICMP Echo Request acceptance rule wasn't added")
for ip in network_model.fixed_ips():
if ip['version'] != 4:
continue
- regex = re.compile('-A .* -j ACCEPT -p tcp -m multiport '
+ regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp -m multiport '
'--dports 80:81 -s %s' % ip['address'])
self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
"TCP port 80/81 acceptance rule wasn't added")
- regex = re.compile('-A .* -j ACCEPT -s %s' % ip['address'])
+ regex = re.compile('\[0\:0\] -A .* -j ACCEPT -s '
+ '%s' % ip['address'])
self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
"Protocol/port-less acceptance rule wasn't added")
- regex = re.compile('-A .* -j ACCEPT -p tcp '
+ regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp '
'-m multiport --dports 80:81 -s 192.168.10.0/24')
self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
"TCP port 80/81 acceptance rule wasn't added")
diff --git a/nova/tests/test_metadata.py b/nova/tests/test_metadata.py
index b9b67326c..13be9e056 100644
--- a/nova/tests/test_metadata.py
+++ b/nova/tests/test_metadata.py
@@ -27,6 +27,7 @@ import webob
from nova.api.metadata import base
from nova.api.metadata import handler
+from nova import block_device
from nova import db
from nova.db.sqlalchemy import api
from nova import exception
@@ -183,7 +184,7 @@ class MetadataTestCase(test.TestCase):
'ebs0': '/dev/sdh'}
self.assertEqual(base._format_instance_mapping(ctxt, instance_ref0),
- base._DEFAULT_MAPPINGS)
+ block_device._DEFAULT_MAPPINGS)
self.assertEqual(base._format_instance_mapping(ctxt, instance_ref1),
expected)
diff --git a/nova/tests/test_misc.py b/nova/tests/test_misc.py
index 58d52290b..10443ecde 100644
--- a/nova/tests/test_misc.py
+++ b/nova/tests/test_misc.py
@@ -108,47 +108,51 @@ class LockTestCase(test.TestCase):
def test_nested_external_works(self):
"""We can nest external syncs"""
- sentinel = object()
+ with utils.tempdir() as tempdir:
+ self.flags(lock_path=tempdir)
+ sentinel = object()
- @utils.synchronized('testlock1', external=True)
- def outer_lock():
+ @utils.synchronized('testlock1', external=True)
+ def outer_lock():
- @utils.synchronized('testlock2', external=True)
- def inner_lock():
- return sentinel
- return inner_lock()
+ @utils.synchronized('testlock2', external=True)
+ def inner_lock():
+ return sentinel
+ return inner_lock()
- self.assertEqual(sentinel, outer_lock())
+ self.assertEqual(sentinel, outer_lock())
def test_synchronized_externally(self):
"""We can lock across multiple processes"""
- rpipe1, wpipe1 = os.pipe()
- rpipe2, wpipe2 = os.pipe()
-
- @utils.synchronized('testlock1', external=True)
- def f(rpipe, wpipe):
- try:
- os.write(wpipe, "foo")
- except OSError, e:
- self.assertEquals(e.errno, errno.EPIPE)
- return
-
- rfds, _wfds, _efds = select.select([rpipe], [], [], 1)
- self.assertEquals(len(rfds), 0, "The other process, which was"
- " supposed to be locked, "
- "wrote on its end of the "
- "pipe")
- os.close(rpipe)
-
- pid = os.fork()
- if pid > 0:
- os.close(wpipe1)
- os.close(rpipe2)
-
- f(rpipe1, wpipe2)
- else:
- os.close(rpipe1)
- os.close(wpipe2)
-
- f(rpipe2, wpipe1)
- os._exit(0)
+ with utils.tempdir() as tempdir:
+ self.flags(lock_path=tempdir)
+ rpipe1, wpipe1 = os.pipe()
+ rpipe2, wpipe2 = os.pipe()
+
+ @utils.synchronized('testlock1', external=True)
+ def f(rpipe, wpipe):
+ try:
+ os.write(wpipe, "foo")
+ except OSError, e:
+ self.assertEquals(e.errno, errno.EPIPE)
+ return
+
+ rfds, _wfds, _efds = select.select([rpipe], [], [], 1)
+ self.assertEquals(len(rfds), 0, "The other process, which was"
+ " supposed to be locked, "
+ "wrote on its end of the "
+ "pipe")
+ os.close(rpipe)
+
+ pid = os.fork()
+ if pid > 0:
+ os.close(wpipe1)
+ os.close(rpipe2)
+
+ f(rpipe1, wpipe2)
+ else:
+ os.close(rpipe1)
+ os.close(wpipe2)
+
+ f(rpipe2, wpipe1)
+ os._exit(0)
diff --git a/nova/tests/test_plugin_api_extensions.py b/nova/tests/test_plugin_api_extensions.py
index d45fd096b..af30c10d1 100644
--- a/nova/tests/test_plugin_api_extensions.py
+++ b/nova/tests/test_plugin_api_extensions.py
@@ -15,11 +15,9 @@
import pkg_resources
-import nova
from nova.api.openstack.compute import extensions as computeextensions
from nova.api.openstack import extensions
from nova.openstack.common.plugin import plugin
-from nova.openstack.common.plugin import pluginmanager
from nova import test
diff --git a/nova/tests/test_policy.py b/nova/tests/test_policy.py
index 41282005a..a85d3e25c 100644
--- a/nova/tests/test_policy.py
+++ b/nova/tests/test_policy.py
@@ -49,6 +49,11 @@ class PolicyFileTestCase(test.TestCase):
tmpfilename = os.path.join(tmpdir, 'policy')
self.flags(policy_file=tmpfilename)
+ # NOTE(uni): context construction invokes policy check to determin
+ # is_admin or not. As a side-effect, policy reset is needed here
+ # to flush existing policy cache.
+ policy.reset()
+
action = "example:test"
with open(tmpfilename, "w") as policyfile:
policyfile.write("""{"example:test": []}""")
diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py
index 1fcff0e4a..794c578d6 100644
--- a/nova/tests/test_quota.py
+++ b/nova/tests/test_quota.py
@@ -243,10 +243,8 @@ class FakeContext(object):
class FakeDriver(object):
- def __init__(self, by_user=None, by_project=None, by_class=None,
- reservations=None):
+ def __init__(self, by_project=None, by_class=None, reservations=None):
self.called = []
- self.by_user = by_user or {}
self.by_project = by_project or {}
self.by_class = by_class or {}
self.reservations = reservations or []
@@ -258,15 +256,6 @@ class FakeDriver(object):
except KeyError:
raise exception.ProjectQuotaNotFound(project_id=project_id)
- def get_by_user(self, context, user_id, project_id, resource):
- self.called.append(('get_by_user', context, user_id, project_id,
- resource))
- try:
- return self.by_user[user_id][resource]
- except KeyError:
- raise exception.UserQuotaNotFound(project_id=project_id,
- user_id=user_id)
-
def get_by_class(self, context, quota_class, resource):
self.called.append(('get_by_class', context, quota_class, resource))
try:
@@ -290,13 +279,6 @@ class FakeDriver(object):
project_id, quota_class, defaults, usages))
return resources
- def get_user_quotas(self, context, resources, user_id, project_id,
- quota_class=None, defaults=True, usages=True):
- self.called.append(('get_user_quotas', context, resources,
- user_id, project_id, quota_class, defaults,
- usages))
- return resources
-
def limit_check(self, context, resources, values):
self.called.append(('limit_check', context, resources, values))
@@ -313,10 +295,6 @@ class FakeDriver(object):
def destroy_all_by_project(self, context, project_id):
self.called.append(('destroy_all_by_project', context, project_id))
- def destroy_all_by_user(self, context, user_id, project_id,):
- self.called.append(('destroy_all_by_user', context, user_id,
- project_id))
-
def expire(self, context):
self.called.append(('expire', context))
@@ -503,19 +481,6 @@ class QuotaEngineTestCase(test.TestCase):
])
self.assertEqual(result, 42)
- def test_get_by_user(self):
- context = FakeContext('test_project', 'test_class')
- driver = FakeDriver(by_user=dict(
- fake_user=dict(test_resource=42)))
- quota_obj = quota.QuotaEngine(quota_driver_class=driver)
- result = quota_obj.get_by_user(context, 'fake_user',
- 'test_project', 'test_resource')
-
- self.assertEqual(driver.called, [
- ('get_by_user', context, 'fake_user', 'test_project',
- 'test_resource'), ])
- self.assertEqual(result, 42)
-
def test_get_by_class(self):
context = FakeContext('test_project', 'test_class')
driver = FakeDriver(by_class=dict(
@@ -586,27 +551,6 @@ class QuotaEngineTestCase(test.TestCase):
self.assertEqual(result1, quota_obj._resources)
self.assertEqual(result2, quota_obj._resources)
- def test_get_user_quotas(self):
- context = FakeContext(None, None)
- driver = FakeDriver()
- quota_obj = self._make_quota_obj(driver)
- result1 = quota_obj.get_user_quotas(context, 'fake_user',
- 'test_project')
- result2 = quota_obj.get_user_quotas(context, 'fake_user',
- 'test_project',
- quota_class='test_class',
- defaults=False,
- usages=False)
-
- self.assertEqual(driver.called, [
- ('get_user_quotas', context, quota_obj._resources,
- 'fake_user', 'test_project', None, True, True),
- ('get_user_quotas', context, quota_obj._resources,
- 'fake_user', 'test_project', 'test_class', False, False),
- ])
- self.assertEqual(result1, quota_obj._resources)
- self.assertEqual(result2, quota_obj._resources)
-
def test_count_no_resource(self):
context = FakeContext(None, None)
driver = FakeDriver()
@@ -718,16 +662,6 @@ class QuotaEngineTestCase(test.TestCase):
('destroy_all_by_project', context, 'test_project'),
])
- def test_destroy_all_by_user(self):
- context = FakeContext(None, None)
- driver = FakeDriver()
- quota_obj = self._make_quota_obj(driver)
- quota_obj.destroy_all_by_user(context, 'fake_user', 'test_project')
-
- self.assertEqual(driver.called, [
- ('destroy_all_by_user', context, 'fake_user', 'test_project'),
- ])
-
def test_expire(self):
context = FakeContext(None, None)
driver = FakeDriver()
@@ -1212,378 +1146,8 @@ class DbQuotaDriverTestCase(test.TestCase):
self.stubs.Set(self.driver, 'get_project_quotas',
fake_get_project_quotas)
- def _stub_get_by_user(self):
- def fake_qgabp(context, user_id, project_id):
- self.calls.append('quota_get_all_by_user')
- self.assertEqual(project_id, 'test_project')
- self.assertEqual(user_id, 'fake_user')
- return dict(
- cores=10,
- gigabytes=50,
- injected_files=2,
- injected_file_path_bytes=127,
- )
-
- def fake_qugabp(context, user_id, project_id):
- self.calls.append('quota_usage_get_all_by_user')
- self.assertEqual(project_id, 'test_project')
- self.assertEqual(user_id, 'fake_user')
- return dict(
- instances=dict(in_use=2, reserved=2),
- cores=dict(in_use=4, reserved=4),
- ram=dict(in_use=10 * 1024, reserved=0),
- volumes=dict(in_use=2, reserved=0),
- gigabytes=dict(in_use=10, reserved=0),
- floating_ips=dict(in_use=2, reserved=0),
- metadata_items=dict(in_use=0, reserved=0),
- injected_files=dict(in_use=0, reserved=0),
- injected_file_content_bytes=dict(in_use=0, reserved=0),
- injected_file_path_bytes=dict(in_use=0, reserved=0),
- )
-
- self.stubs.Set(db, 'quota_get_all_by_user', fake_qgabp)
- self.stubs.Set(db, 'quota_usage_get_all_by_user', fake_qugabp)
-
- self._stub_quota_class_get_all_by_name()
-
- def test_get_user_quotas(self):
- self._stub_get_by_user()
- result = self.driver.get_user_quotas(
- FakeContext('test_project', 'test_class'),
- quota.QUOTAS._resources, 'fake_user', 'test_project')
-
- self.assertEqual(self.calls, [
- 'quota_get_all_by_user',
- 'quota_usage_get_all_by_user',
- 'quota_class_get_all_by_name',
- ])
- self.assertEqual(result, dict(
- instances=dict(
- limit=5,
- in_use=2,
- reserved=2,
- ),
- cores=dict(
- limit=10,
- in_use=4,
- reserved=4,
- ),
- ram=dict(
- limit=25 * 1024,
- in_use=10 * 1024,
- reserved=0,
- ),
- volumes=dict(
- limit=10,
- in_use=2,
- reserved=0,
- ),
- gigabytes=dict(
- limit=50,
- in_use=10,
- reserved=0,
- ),
- floating_ips=dict(
- limit=10,
- in_use=2,
- reserved=0,
- ),
- metadata_items=dict(
- limit=64,
- in_use=0,
- reserved=0,
- ),
- injected_files=dict(
- limit=2,
- in_use=0,
- reserved=0,
- ),
- injected_file_content_bytes=dict(
- limit=5 * 1024,
- in_use=0,
- reserved=0,
- ),
- injected_file_path_bytes=dict(
- limit=127,
- in_use=0,
- reserved=0,
- ),
- security_groups=dict(
- limit=10,
- in_use=0,
- reserved=0,
- ),
- security_group_rules=dict(
- limit=20,
- in_use=0,
- reserved=0,
- ),
- key_pairs=dict(
- limit=100,
- in_use=0,
- reserved=0,
- ),
- ))
-
- def test_get_user_quotas_alt_context_no_class(self):
- self._stub_get_by_user()
- result = self.driver.get_user_quotas(
- FakeContext('other_project', 'other_class'),
- quota.QUOTAS._resources, 'fake_user', 'test_project')
-
- self.assertEqual(self.calls, [
- 'quota_get_all_by_user',
- 'quota_usage_get_all_by_user',
- ])
- self.assertEqual(result, dict(
- instances=dict(
- limit=10,
- in_use=2,
- reserved=2,
- ),
- cores=dict(
- limit=10,
- in_use=4,
- reserved=4,
- ),
- ram=dict(
- limit=50 * 1024,
- in_use=10 * 1024,
- reserved=0,
- ),
- volumes=dict(
- limit=10,
- in_use=2,
- reserved=0,
- ),
- gigabytes=dict(
- limit=50,
- in_use=10,
- reserved=0,
- ),
- floating_ips=dict(
- limit=10,
- in_use=2,
- reserved=0,
- ),
- metadata_items=dict(
- limit=128,
- in_use=0,
- reserved=0,
- ),
- injected_files=dict(
- limit=2,
- in_use=0,
- reserved=0,
- ),
- injected_file_content_bytes=dict(
- limit=10 * 1024,
- in_use=0,
- reserved=0,
- ),
- injected_file_path_bytes=dict(
- limit=127,
- in_use=0,
- reserved=0,
- ),
- security_groups=dict(
- limit=10,
- in_use=0,
- reserved=0,
- ),
- security_group_rules=dict(
- limit=20,
- in_use=0,
- reserved=0,
- ),
- key_pairs=dict(
- limit=100,
- in_use=0,
- reserved=0,
- ),
- ))
-
- def test_get_user_quotas_alt_context_with_class(self):
- self._stub_get_by_user()
- result = self.driver.get_user_quotas(
- FakeContext('other_project', 'other_class'),
- quota.QUOTAS._resources, 'fake_user', 'test_project',
- quota_class='test_class')
-
- self.assertEqual(self.calls, [
- 'quota_get_all_by_user',
- 'quota_usage_get_all_by_user',
- 'quota_class_get_all_by_name',
- ])
- self.assertEqual(result, dict(
- instances=dict(
- limit=5,
- in_use=2,
- reserved=2,
- ),
- cores=dict(
- limit=10,
- in_use=4,
- reserved=4,
- ),
- ram=dict(
- limit=25 * 1024,
- in_use=10 * 1024,
- reserved=0,
- ),
- volumes=dict(
- limit=10,
- in_use=2,
- reserved=0,
- ),
- gigabytes=dict(
- limit=50,
- in_use=10,
- reserved=0,
- ),
- floating_ips=dict(
- limit=10,
- in_use=2,
- reserved=0,
- ),
- metadata_items=dict(
- limit=64,
- in_use=0,
- reserved=0,
- ),
- injected_files=dict(
- limit=2,
- in_use=0,
- reserved=0,
- ),
- injected_file_content_bytes=dict(
- limit=5 * 1024,
- in_use=0,
- reserved=0,
- ),
- injected_file_path_bytes=dict(
- limit=127,
- in_use=0,
- reserved=0,
- ),
- security_groups=dict(
- limit=10,
- in_use=0,
- reserved=0,
- ),
- security_group_rules=dict(
- limit=20,
- in_use=0,
- reserved=0,
- ),
- key_pairs=dict(
- limit=100,
- in_use=0,
- reserved=0,
- ),
- ))
-
- def test_get_user_quotas_no_defaults(self):
- self._stub_get_by_user()
- result = self.driver.get_user_quotas(
- FakeContext('test_project', 'test_class'),
- quota.QUOTAS._resources, 'fake_user', 'test_project',
- defaults=False)
-
- self.assertEqual(self.calls, [
- 'quota_get_all_by_user',
- 'quota_usage_get_all_by_user',
- 'quota_class_get_all_by_name',
- ])
- self.assertEqual(result, dict(
- cores=dict(
- limit=10,
- in_use=4,
- reserved=4,
- ),
- gigabytes=dict(
- limit=50,
- in_use=10,
- reserved=0,
- ),
- injected_files=dict(
- limit=2,
- in_use=0,
- reserved=0,
- ),
- injected_file_path_bytes=dict(
- limit=127,
- in_use=0,
- reserved=0,
- ),
- ))
-
- def test_get_user_quotas_no_usages(self):
- self._stub_get_by_user()
- result = self.driver.get_user_quotas(
- FakeContext('test_project', 'test_class'),
- quota.QUOTAS._resources, 'fake_user', 'test_project',
- usages=False)
-
- self.assertEqual(self.calls, [
- 'quota_get_all_by_user',
- 'quota_class_get_all_by_name',
- ])
- self.assertEqual(result, dict(
- instances=dict(
- limit=5,
- ),
- cores=dict(
- limit=10,
- ),
- ram=dict(
- limit=25 * 1024,
- ),
- volumes=dict(
- limit=10,
- ),
- gigabytes=dict(
- limit=50,
- ),
- floating_ips=dict(
- limit=10,
- ),
- metadata_items=dict(
- limit=64,
- ),
- injected_files=dict(
- limit=2,
- ),
- injected_file_content_bytes=dict(
- limit=5 * 1024,
- ),
- injected_file_path_bytes=dict(
- limit=127,
- ),
- security_groups=dict(
- limit=10,
- ),
- security_group_rules=dict(
- limit=20,
- ),
- key_pairs=dict(
- limit=100,
- ),
- ))
-
- def _stub_get_user_quotas(self):
- def fake_get_user_quotas(context, resources, user_id, project_id,
- quota_class=None, defaults=True,
- usages=True):
- self.calls.append('get_user_quotas')
- return dict((k, dict(limit=v.default))
- for k, v in resources.items())
-
- self.stubs.Set(self.driver, 'get_user_quotas',
- fake_get_user_quotas)
-
def test_get_quotas_has_sync_unknown(self):
- self._stub_get_user_quotas()
+ self._stub_get_project_quotas()
self.assertRaises(exception.QuotaResourceUnknown,
self.driver._get_quotas,
None, quota.QUOTAS._resources,
@@ -1591,7 +1155,7 @@ class DbQuotaDriverTestCase(test.TestCase):
self.assertEqual(self.calls, [])
def test_get_quotas_no_sync_unknown(self):
- self._stub_get_user_quotas()
+ self._stub_get_project_quotas()
self.assertRaises(exception.QuotaResourceUnknown,
self.driver._get_quotas,
None, quota.QUOTAS._resources,
@@ -1599,7 +1163,7 @@ class DbQuotaDriverTestCase(test.TestCase):
self.assertEqual(self.calls, [])
def test_get_quotas_has_sync_no_sync_resource(self):
- self._stub_get_user_quotas()
+ self._stub_get_project_quotas()
self.assertRaises(exception.QuotaResourceUnknown,
self.driver._get_quotas,
None, quota.QUOTAS._resources,
@@ -1607,7 +1171,7 @@ class DbQuotaDriverTestCase(test.TestCase):
self.assertEqual(self.calls, [])
def test_get_quotas_no_sync_has_sync_resource(self):
- self._stub_get_user_quotas()
+ self._stub_get_project_quotas()
self.assertRaises(exception.QuotaResourceUnknown,
self.driver._get_quotas,
None, quota.QUOTAS._resources,
@@ -1615,7 +1179,7 @@ class DbQuotaDriverTestCase(test.TestCase):
self.assertEqual(self.calls, [])
def test_get_quotas_has_sync(self):
- self._stub_get_user_quotas()
+ self._stub_get_project_quotas()
result = self.driver._get_quotas(FakeContext('test_project',
'test_class'),
quota.QUOTAS._resources,
@@ -1624,7 +1188,7 @@ class DbQuotaDriverTestCase(test.TestCase):
'floating_ips', 'security_groups'],
True)
- self.assertEqual(self.calls, ['get_user_quotas'])
+ self.assertEqual(self.calls, ['get_project_quotas'])
self.assertEqual(result, dict(
instances=10,
cores=20,
@@ -1636,7 +1200,7 @@ class DbQuotaDriverTestCase(test.TestCase):
))
def test_get_quotas_no_sync(self):
- self._stub_get_user_quotas()
+ self._stub_get_project_quotas()
result = self.driver._get_quotas(FakeContext('test_project',
'test_class'),
quota.QUOTAS._resources,
@@ -1645,7 +1209,7 @@ class DbQuotaDriverTestCase(test.TestCase):
'injected_file_path_bytes',
'security_group_rules'], False)
- self.assertEqual(self.calls, ['get_user_quotas'])
+ self.assertEqual(self.calls, ['get_project_quotas'])
self.assertEqual(result, dict(
metadata_items=128,
injected_files=5,
@@ -1655,7 +1219,7 @@ class DbQuotaDriverTestCase(test.TestCase):
))
def test_limit_check_under(self):
- self._stub_get_user_quotas()
+ self._stub_get_project_quotas()
self.assertRaises(exception.InvalidQuotaValue,
self.driver.limit_check,
FakeContext('test_project', 'test_class'),
@@ -1663,7 +1227,7 @@ class DbQuotaDriverTestCase(test.TestCase):
dict(metadata_items=-1))
def test_limit_check_over(self):
- self._stub_get_user_quotas()
+ self._stub_get_project_quotas()
self.assertRaises(exception.OverQuota,
self.driver.limit_check,
FakeContext('test_project', 'test_class'),
@@ -1672,13 +1236,13 @@ class DbQuotaDriverTestCase(test.TestCase):
def test_limit_check_unlimited(self):
self.flags(quota_metadata_items=-1)
- self._stub_get_user_quotas()
+ self._stub_get_project_quotas()
self.driver.limit_check(FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(metadata_items=32767))
def test_limit_check(self):
- self._stub_get_user_quotas()
+ self._stub_get_project_quotas()
self.driver.limit_check(FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(metadata_items=128))
@@ -1692,7 +1256,7 @@ class DbQuotaDriverTestCase(test.TestCase):
self.stubs.Set(db, 'quota_reserve', fake_quota_reserve)
def test_reserve_bad_expire(self):
- self._stub_get_user_quotas()
+ self._stub_get_project_quotas()
self._stub_quota_reserve()
self.assertRaises(exception.InvalidReservationExpiration,
self.driver.reserve,
@@ -1702,7 +1266,7 @@ class DbQuotaDriverTestCase(test.TestCase):
self.assertEqual(self.calls, [])
def test_reserve_default_expire(self):
- self._stub_get_user_quotas()
+ self._stub_get_project_quotas()
self._stub_quota_reserve()
result = self.driver.reserve(FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
@@ -1710,13 +1274,13 @@ class DbQuotaDriverTestCase(test.TestCase):
expire = timeutils.utcnow() + datetime.timedelta(seconds=86400)
self.assertEqual(self.calls, [
- 'get_user_quotas',
+ 'get_project_quotas',
('quota_reserve', expire, 0, 0),
])
self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
def test_reserve_int_expire(self):
- self._stub_get_user_quotas()
+ self._stub_get_project_quotas()
self._stub_quota_reserve()
result = self.driver.reserve(FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
@@ -1724,13 +1288,13 @@ class DbQuotaDriverTestCase(test.TestCase):
expire = timeutils.utcnow() + datetime.timedelta(seconds=3600)
self.assertEqual(self.calls, [
- 'get_user_quotas',
+ 'get_project_quotas',
('quota_reserve', expire, 0, 0),
])
self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
def test_reserve_timedelta_expire(self):
- self._stub_get_user_quotas()
+ self._stub_get_project_quotas()
self._stub_quota_reserve()
expire_delta = datetime.timedelta(seconds=60)
result = self.driver.reserve(FakeContext('test_project', 'test_class'),
@@ -1739,13 +1303,13 @@ class DbQuotaDriverTestCase(test.TestCase):
expire = timeutils.utcnow() + expire_delta
self.assertEqual(self.calls, [
- 'get_user_quotas',
+ 'get_project_quotas',
('quota_reserve', expire, 0, 0),
])
self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
def test_reserve_datetime_expire(self):
- self._stub_get_user_quotas()
+ self._stub_get_project_quotas()
self._stub_quota_reserve()
expire = timeutils.utcnow() + datetime.timedelta(seconds=120)
result = self.driver.reserve(FakeContext('test_project', 'test_class'),
@@ -1753,13 +1317,13 @@ class DbQuotaDriverTestCase(test.TestCase):
dict(instances=2), expire=expire)
self.assertEqual(self.calls, [
- 'get_user_quotas',
+ 'get_project_quotas',
('quota_reserve', expire, 0, 0),
])
self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
def test_reserve_until_refresh(self):
- self._stub_get_user_quotas()
+ self._stub_get_project_quotas()
self._stub_quota_reserve()
self.flags(until_refresh=500)
expire = timeutils.utcnow() + datetime.timedelta(seconds=120)
@@ -1768,13 +1332,13 @@ class DbQuotaDriverTestCase(test.TestCase):
dict(instances=2), expire=expire)
self.assertEqual(self.calls, [
- 'get_user_quotas',
+ 'get_project_quotas',
('quota_reserve', expire, 500, 0),
])
self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
def test_reserve_max_age(self):
- self._stub_get_user_quotas()
+ self._stub_get_project_quotas()
self._stub_quota_reserve()
self.flags(max_age=86400)
expire = timeutils.utcnow() + datetime.timedelta(seconds=120)
@@ -1783,7 +1347,7 @@ class DbQuotaDriverTestCase(test.TestCase):
dict(instances=2), expire=expire)
self.assertEqual(self.calls, [
- 'get_user_quotas',
+ 'get_project_quotas',
('quota_reserve', expire, 0, 86400),
])
self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
@@ -1816,7 +1380,7 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
self.sync_called = set()
def make_sync(res_name):
- def sync(context, user_id, project_id, session):
+ def sync(context, project_id, session):
self.sync_called.add(res_name)
if res_name in self.usages:
if self.usages[res_name].in_use < 0:
@@ -1843,22 +1407,21 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
def fake_get_quota_usages(context, session):
return self.usages.copy()
- def fake_quota_usage_create(context, user_id, project_id, resource,
- in_use, reserved, until_refresh,
- session=None, save=True):
+ def fake_quota_usage_create(context, project_id, resource, in_use,
+ reserved, until_refresh, session=None,
+ save=True):
quota_usage_ref = self._make_quota_usage(
- user_id, project_id, resource, in_use, reserved,
- until_refresh, timeutils.utcnow(), timeutils.utcnow())
+ project_id, resource, in_use, reserved, until_refresh,
+ timeutils.utcnow(), timeutils.utcnow())
self.usages_created[resource] = quota_usage_ref
return quota_usage_ref
- def fake_reservation_create(context, uuid, usage_id, user_id,
- project_id, resource, delta, expire,
- session=None):
+ def fake_reservation_create(context, uuid, usage_id, project_id,
+ resource, delta, expire, session=None):
reservation_ref = self._make_reservation(
- uuid, usage_id, user_id, project_id, resource, delta, expire,
+ uuid, usage_id, project_id, resource, delta, expire,
timeutils.utcnow(), timeutils.utcnow())
self.reservations_created[resource] = reservation_ref
@@ -1872,11 +1435,10 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
timeutils.set_time_override()
- def _make_quota_usage(self, user_id, project_id, resource, in_use,
- reserved, until_refresh, created_at, updated_at):
+ def _make_quota_usage(self, project_id, resource, in_use, reserved,
+ until_refresh, created_at, updated_at):
quota_usage_ref = FakeUsage()
quota_usage_ref.id = len(self.usages) + len(self.usages_created)
- quota_usage_ref.user_id = user_id
quota_usage_ref.project_id = project_id
quota_usage_ref.resource = resource
quota_usage_ref.in_use = in_use
@@ -1889,15 +1451,14 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
return quota_usage_ref
- def init_usage(self, user_id, project_id, resource, in_use, reserved,
+ def init_usage(self, project_id, resource, in_use, reserved,
until_refresh=None, created_at=None, updated_at=None):
if created_at is None:
created_at = timeutils.utcnow()
if updated_at is None:
updated_at = timeutils.utcnow()
- quota_usage_ref = self._make_quota_usage(user_id, project_id,
- resource, in_use,
+ quota_usage_ref = self._make_quota_usage(project_id, resource, in_use,
reserved, until_refresh,
created_at, updated_at)
@@ -1912,13 +1473,12 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
"%s != %s on usage for resource %s" %
(actual, value, resource))
- def _make_reservation(self, uuid, usage_id, user_id, project_id, resource,
+ def _make_reservation(self, uuid, usage_id, project_id, resource,
delta, expire, created_at, updated_at):
reservation_ref = sqa_models.Reservation()
reservation_ref.id = len(self.reservations_created)
reservation_ref.uuid = uuid
reservation_ref.usage_id = usage_id
- reservation_ref.user_id = user_id
reservation_ref.project_id = project_id
reservation_ref.resource = resource
reservation_ref.delta = delta
@@ -1965,19 +1525,16 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
self.assertEqual(self.sync_called, set(['instances', 'cores', 'ram']))
self.compare_usage(self.usages_created, [
dict(resource='instances',
- user_id='fake_user',
project_id='test_project',
in_use=0,
reserved=2,
until_refresh=None),
dict(resource='cores',
- user_id='fake_user',
project_id='test_project',
in_use=0,
reserved=4,
until_refresh=None),
dict(resource='ram',
- user_id='fake_user',
project_id='test_project',
in_use=0,
reserved=2 * 1024,
@@ -1986,12 +1543,10 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
self.compare_reservation(result, [
dict(resource='instances',
usage_id=self.usages_created['instances'],
- user_id='fake_user',
project_id='test_project',
delta=2),
dict(resource='cores',
usage_id=self.usages_created['cores'],
- user_id='fake_user',
project_id='test_project',
delta=4),
dict(resource='ram',
@@ -2000,12 +1555,9 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
])
def test_quota_reserve_negative_in_use(self):
- self.init_usage('fake_user', 'test_project', 'instances', -1, 0,
- until_refresh=1)
- self.init_usage('fake_user', 'test_project', 'cores', -1, 0,
- until_refresh=1)
- self.init_usage('fake_user', 'test_project', 'ram', -1, 0,
- until_refresh=1)
+ self.init_usage('test_project', 'instances', -1, 0, until_refresh=1)
+ self.init_usage('test_project', 'cores', -1, 0, until_refresh=1)
+ self.init_usage('test_project', 'ram', -1, 0, until_refresh=1)
context = FakeContext('test_project', 'test_class')
quotas = dict(
instances=5,
@@ -2023,19 +1575,16 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
self.assertEqual(self.sync_called, set(['instances', 'cores', 'ram']))
self.compare_usage(self.usages, [
dict(resource='instances',
- user_id='fake_user',
project_id='test_project',
in_use=2,
reserved=2,
until_refresh=5),
dict(resource='cores',
- user_id='fake_user',
project_id='test_project',
in_use=2,
reserved=4,
until_refresh=5),
dict(resource='ram',
- user_id='fake_user',
project_id='test_project',
in_use=2,
reserved=2 * 1024,
@@ -2045,12 +1594,10 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
self.compare_reservation(result, [
dict(resource='instances',
usage_id=self.usages['instances'],
- user_id='fake_user',
project_id='test_project',
delta=2),
dict(resource='cores',
usage_id=self.usages['cores'],
- user_id='fake_user',
project_id='test_project',
delta=4),
dict(resource='ram',
@@ -2059,12 +1606,9 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
])
def test_quota_reserve_until_refresh(self):
- self.init_usage('fake_user', 'test_project', 'instances', 3, 0,
- until_refresh=1)
- self.init_usage('fake_user', 'test_project', 'cores', 3, 0,
- until_refresh=1)
- self.init_usage('fake_user', 'test_project', 'ram', 3, 0,
- until_refresh=1)
+ self.init_usage('test_project', 'instances', 3, 0, until_refresh=1)
+ self.init_usage('test_project', 'cores', 3, 0, until_refresh=1)
+ self.init_usage('test_project', 'ram', 3, 0, until_refresh=1)
context = FakeContext('test_project', 'test_class')
quotas = dict(
instances=5,
@@ -2082,19 +1626,16 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
self.assertEqual(self.sync_called, set(['instances', 'cores', 'ram']))
self.compare_usage(self.usages, [
dict(resource='instances',
- user_id='fake_user',
project_id='test_project',
in_use=2,
reserved=2,
until_refresh=5),
dict(resource='cores',
- user_id='fake_user',
project_id='test_project',
in_use=2,
reserved=4,
until_refresh=5),
dict(resource='ram',
- user_id='fake_user',
project_id='test_project',
in_use=2,
reserved=2 * 1024,
@@ -2104,12 +1645,10 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
self.compare_reservation(result, [
dict(resource='instances',
usage_id=self.usages['instances'],
- user_id='fake_user',
project_id='test_project',
delta=2),
dict(resource='cores',
usage_id=self.usages['cores'],
- user_id='fake_user',
project_id='test_project',
delta=4),
dict(resource='ram',
@@ -2121,11 +1660,11 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
max_age = 3600
record_created = (timeutils.utcnow() -
datetime.timedelta(seconds=max_age))
- self.init_usage('fake_user', 'test_project', 'instances', 3, 0,
+ self.init_usage('test_project', 'instances', 3, 0,
created_at=record_created, updated_at=record_created)
- self.init_usage('fake_user', 'test_project', 'cores', 3, 0,
+ self.init_usage('test_project', 'cores', 3, 0,
created_at=record_created, updated_at=record_created)
- self.init_usage('fake_user', 'test_project', 'ram', 3, 0,
+ self.init_usage('test_project', 'ram', 3, 0,
created_at=record_created, updated_at=record_created)
context = FakeContext('test_project', 'test_class')
quotas = dict(
@@ -2144,19 +1683,16 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
self.assertEqual(self.sync_called, set(['instances', 'cores', 'ram']))
self.compare_usage(self.usages, [
dict(resource='instances',
- user_id='fake_user',
project_id='test_project',
in_use=2,
reserved=2,
until_refresh=None),
dict(resource='cores',
- user_id='fake_user',
project_id='test_project',
in_use=2,
reserved=4,
until_refresh=None),
dict(resource='ram',
- user_id='fake_user',
project_id='test_project',
in_use=2,
reserved=2 * 1024,
@@ -2166,12 +1702,10 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
self.compare_reservation(result, [
dict(resource='instances',
usage_id=self.usages['instances'],
- user_id='fake_user',
project_id='test_project',
delta=2),
dict(resource='cores',
usage_id=self.usages['cores'],
- user_id='fake_user',
project_id='test_project',
delta=4),
dict(resource='ram',
@@ -2180,9 +1714,9 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
])
def test_quota_reserve_no_refresh(self):
- self.init_usage('fake_user', 'test_project', 'instances', 3, 0)
- self.init_usage('fake_user', 'test_project', 'cores', 3, 0)
- self.init_usage('fake_user', 'test_project', 'ram', 3, 0)
+ self.init_usage('test_project', 'instances', 3, 0)
+ self.init_usage('test_project', 'cores', 3, 0)
+ self.init_usage('test_project', 'ram', 3, 0)
context = FakeContext('test_project', 'test_class')
quotas = dict(
instances=5,
@@ -2200,19 +1734,16 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
self.assertEqual(self.sync_called, set([]))
self.compare_usage(self.usages, [
dict(resource='instances',
- user_id='fake_user',
project_id='test_project',
in_use=3,
reserved=2,
until_refresh=None),
dict(resource='cores',
- user_id='fake_user',
project_id='test_project',
in_use=3,
reserved=4,
until_refresh=None),
dict(resource='ram',
- user_id='fake_user',
project_id='test_project',
in_use=3,
reserved=2 * 1024,
@@ -2222,12 +1753,10 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
self.compare_reservation(result, [
dict(resource='instances',
usage_id=self.usages['instances'],
- user_id='fake_user',
project_id='test_project',
delta=2),
dict(resource='cores',
usage_id=self.usages['cores'],
- user_id='fake_user',
project_id='test_project',
delta=4),
dict(resource='ram',
@@ -2236,9 +1765,9 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
])
def test_quota_reserve_unders(self):
- self.init_usage('fake_user', 'test_project', 'instances', 1, 0)
- self.init_usage('fake_user', 'test_project', 'cores', 3, 0)
- self.init_usage('fake_user', 'test_project', 'ram', 1 * 1024, 0)
+ self.init_usage('test_project', 'instances', 1, 0)
+ self.init_usage('test_project', 'cores', 3, 0)
+ self.init_usage('test_project', 'ram', 1 * 1024, 0)
context = FakeContext('test_project', 'test_class')
quotas = dict(
instances=5,
@@ -2256,19 +1785,16 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
self.assertEqual(self.sync_called, set([]))
self.compare_usage(self.usages, [
dict(resource='instances',
- user_id='fake_user',
project_id='test_project',
in_use=1,
reserved=0,
until_refresh=None),
dict(resource='cores',
- user_id='fake_user',
project_id='test_project',
in_use=3,
reserved=0,
until_refresh=None),
dict(resource='ram',
- user_id='fake_user',
project_id='test_project',
in_use=1 * 1024,
reserved=0,
@@ -2278,12 +1804,10 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
self.compare_reservation(result, [
dict(resource='instances',
usage_id=self.usages['instances'],
- user_id='fake_user',
project_id='test_project',
delta=-2),
dict(resource='cores',
usage_id=self.usages['cores'],
- user_id='fake_user',
project_id='test_project',
delta=-4),
dict(resource='ram',
@@ -2292,9 +1816,9 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
])
def test_quota_reserve_overs(self):
- self.init_usage('fake_user', 'test_project', 'instances', 4, 0)
- self.init_usage('fake_user', 'test_project', 'cores', 8, 0)
- self.init_usage('fake_user', 'test_project', 'ram', 10 * 1024, 0)
+ self.init_usage('test_project', 'instances', 4, 0)
+ self.init_usage('test_project', 'cores', 8, 0)
+ self.init_usage('test_project', 'ram', 10 * 1024, 0)
context = FakeContext('test_project', 'test_class')
quotas = dict(
instances=5,
@@ -2314,19 +1838,16 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
self.assertEqual(self.sync_called, set([]))
self.compare_usage(self.usages, [
dict(resource='instances',
- user_id='fake_user',
project_id='test_project',
in_use=4,
reserved=0,
until_refresh=None),
dict(resource='cores',
- user_id='fake_user',
project_id='test_project',
in_use=8,
reserved=0,
until_refresh=None),
dict(resource='ram',
- user_id='fake_user',
project_id='test_project',
in_use=10 * 1024,
reserved=0,
@@ -2336,9 +1857,9 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
self.assertEqual(self.reservations_created, {})
def test_quota_reserve_reduction(self):
- self.init_usage('fake_user', 'test_project', 'instances', 10, 0)
- self.init_usage('fake_user', 'test_project', 'cores', 20, 0)
- self.init_usage('fake_user', 'test_project', 'ram', 20 * 1024, 0)
+ self.init_usage('test_project', 'instances', 10, 0)
+ self.init_usage('test_project', 'cores', 20, 0)
+ self.init_usage('test_project', 'ram', 20 * 1024, 0)
context = FakeContext('test_project', 'test_class')
quotas = dict(
instances=5,
@@ -2356,19 +1877,16 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
self.assertEqual(self.sync_called, set([]))
self.compare_usage(self.usages, [
dict(resource='instances',
- user_id='fake_user',
project_id='test_project',
in_use=10,
reserved=0,
until_refresh=None),
dict(resource='cores',
- user_id='fake_user',
project_id='test_project',
in_use=20,
reserved=0,
until_refresh=None),
dict(resource='ram',
- user_id='fake_user',
project_id='test_project',
in_use=20 * 1024,
reserved=0,
@@ -2378,17 +1896,14 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
self.compare_reservation(result, [
dict(resource='instances',
usage_id=self.usages['instances'],
- user_id='fake_user',
project_id='test_project',
delta=-2),
dict(resource='cores',
usage_id=self.usages['cores'],
- user_id='fake_user',
project_id='test_project',
delta=-4),
dict(resource='ram',
usage_id=self.usages['ram'],
- user_id='fake_user',
project_id='test_project',
delta=-2 * 1024),
])
diff --git a/nova/tests/test_service.py b/nova/tests/test_service.py
index 6aa390a11..5e357deeb 100644
--- a/nova/tests/test_service.py
+++ b/nova/tests/test_service.py
@@ -22,7 +22,6 @@ Unit Tests for remote procedure calls using queue
import mox
-from eventlet import greenthread
from nova import context
from nova import db
diff --git a/nova/tests/test_storwize_svc.py b/nova/tests/test_storwize_svc.py
index 15ab96130..e735bf66c 100644
--- a/nova/tests/test_storwize_svc.py
+++ b/nova/tests/test_storwize_svc.py
@@ -31,7 +31,6 @@ from nova import exception
from nova.openstack.common import excutils
from nova.openstack.common import log as logging
from nova import test
-from nova import utils
from nova.volume import storwize_svc
LOG = logging.getLogger(__name__)
diff --git a/nova/tests/test_utils.py b/nova/tests/test_utils.py
index 898d23343..b4e1ebb3d 100644
--- a/nova/tests/test_utils.py
+++ b/nova/tests/test_utils.py
@@ -19,14 +19,11 @@ import datetime
import hashlib
import os
import os.path
-import shutil
-import socket
import StringIO
import tempfile
import eventlet
from eventlet import greenpool
-import iso8601
import mox
import nova
diff --git a/nova/tests/test_volume.py b/nova/tests/test_volume.py
index 7654964a3..56ee53ee6 100644
--- a/nova/tests/test_volume.py
+++ b/nova/tests/test_volume.py
@@ -31,7 +31,6 @@ from nova import db
from nova import exception
from nova import flags
from nova.openstack.common import importutils
-from nova.openstack.common import log as logging
from nova.openstack.common.notifier import api as notifier_api
from nova.openstack.common.notifier import test_notifier
from nova.openstack.common import rpc
@@ -42,7 +41,6 @@ import nova.volume.api
QUOTAS = quota.QUOTAS
FLAGS = flags.FLAGS
-LOG = logging.getLogger(__name__)
class VolumeTestCase(test.TestCase):
@@ -297,7 +295,6 @@ class VolumeTestCase(test.TestCase):
volume_id)
self.assert_(iscsi_target not in targets)
targets.append(iscsi_target)
- LOG.debug(_("Target %s allocated"), iscsi_target)
total_slots = FLAGS.iscsi_num_targets
for _index in xrange(total_slots):
volume = self._create_volume()
@@ -517,8 +514,7 @@ class DriverTestCase(test.TestCase):
super(DriverTestCase, self).setUp()
vol_tmpdir = tempfile.mkdtemp()
self.flags(volume_driver=self.driver_name,
- volumes_dir=vol_tmpdir,
- logging_default_format_string="%(message)s")
+ volumes_dir=vol_tmpdir)
self.volume = importutils.import_object(FLAGS.volume_manager)
self.context = context.get_admin_context()
self.output = ""
@@ -528,11 +524,6 @@ class DriverTestCase(test.TestCase):
return self.output, None
self.volume.driver.set_execute(_fake_execute)
- log = logging.getLogger('nova')
- self.stream = cStringIO.StringIO()
- log.logger.addHandler(logging.logging.StreamHandler(self.stream))
-
- inst = {}
instance = db.instance_create(self.context, {})
self.instance_id = instance['id']
self.instance_uuid = instance['uuid']
@@ -545,8 +536,7 @@ class DriverTestCase(test.TestCase):
super(DriverTestCase, self).tearDown()
def _attach_volume(self):
- """Attach volumes to an instance. This function also sets
- a fake log message."""
+ """Attach volumes to an instance."""
return []
def _detach_volume(self, volume_id_list):
@@ -583,8 +573,7 @@ class ISCSITestCase(DriverTestCase):
driver_name = "nova.volume.driver.ISCSIDriver"
def _attach_volume(self):
- """Attach volumes to an instance. This function also sets
- a fake log message."""
+ """Attach volumes to an instance. """
volume_id_list = []
for index in xrange(3):
vol = {}
@@ -602,13 +591,9 @@ class ISCSITestCase(DriverTestCase):
return volume_id_list
def test_check_for_export_with_no_volume(self):
- """No log message when no volume is attached to an instance."""
- self.stream.truncate(0)
self.volume.check_for_export(self.context, self.instance_id)
- self.assertEqual(self.stream.getvalue(), '')
def test_check_for_export_with_all_volume_exported(self):
- """No log message when all the processes are running."""
volume_id_list = self._attach_volume()
self.mox.StubOutWithMock(self.volume.driver.tgtadm, 'show_target')
@@ -616,10 +601,8 @@ class ISCSITestCase(DriverTestCase):
tid = db.volume_get_iscsi_target_num(self.context, i)
self.volume.driver.tgtadm.show_target(tid)
- self.stream.truncate(0)
self.mox.ReplayAll()
self.volume.check_for_export(self.context, self.instance_id)
- self.assertEqual(self.stream.getvalue(), '')
self.mox.UnsetStubs()
self._detach_volume(volume_id_list)
@@ -639,8 +622,6 @@ class ISCSITestCase(DriverTestCase):
self.volume.check_for_export,
self.context,
self.instance_id)
- msg = _("Cannot confirm exported volume id:%s.") % volume_id_list[0]
- self.assertTrue(0 <= self.stream.getvalue().find(msg))
self.mox.UnsetStubs()
self._detach_volume(volume_id_list)
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index 04049ae70..c84a924e3 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -1499,13 +1499,15 @@ class XenAPIDom0IptablesFirewallTestCase(stubs.XenAPITestBase):
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [915599:63811649]',
':nova-block-ipv4 - [0:0]',
- '-A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
- '-A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
+ '[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
+ '[0:0] -A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
',ESTABLISHED -j ACCEPT ',
- '-A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
- '-A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
- '-A FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable ',
- '-A FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable ',
+ '[0:0] -A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
+ '[0:0] -A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
+ '[0:0] -A FORWARD -o virbr0 -j REJECT '
+ '--reject-with icmp-port-unreachable ',
+ '[0:0] -A FORWARD -i virbr0 -j REJECT '
+ '--reject-with icmp-port-unreachable ',
'COMMIT',
'# Completed on Mon Dec 6 11:54:13 2010',
]
@@ -1598,16 +1600,17 @@ class XenAPIDom0IptablesFirewallTestCase(stubs.XenAPITestBase):
self.assertTrue(security_group_chain,
"The security group chain wasn't added")
- regex = re.compile('-A .* -j ACCEPT -p icmp -s 192.168.11.0/24')
+ regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp'
+ ' -s 192.168.11.0/24')
self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
"ICMP acceptance rule wasn't added")
- regex = re.compile('-A .* -j ACCEPT -p icmp -m icmp --icmp-type 8'
- ' -s 192.168.11.0/24')
+ regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp -m icmp'
+ ' --icmp-type 8 -s 192.168.11.0/24')
self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
"ICMP Echo Request acceptance rule wasn't added")
- regex = re.compile('-A .* -j ACCEPT -p tcp --dport 80:81'
+ regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp --dport 80:81'
' -s 192.168.10.0/24')
self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
"TCP port 80/81 acceptance rule wasn't added")
@@ -1652,7 +1655,7 @@ class XenAPIDom0IptablesFirewallTestCase(stubs.XenAPITestBase):
for ip in network_model.fixed_ips():
if ip['version'] != 4:
continue
- regex = re.compile('-A .* -j ACCEPT -p tcp'
+ regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp'
' --dport 80:81 -s %s' % ip['address'])
self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
"TCP port 80/81 acceptance rule wasn't added")
@@ -1717,7 +1720,7 @@ class XenAPIDom0IptablesFirewallTestCase(stubs.XenAPITestBase):
'cidr': '192.168.99.0/24'})
#validate the extra rule
self.fw.refresh_security_group_rules(secgroup)
- regex = re.compile('-A .* -j ACCEPT -p udp --dport 200:299'
+ regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p udp --dport 200:299'
' -s 192.168.99.0/24')
self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
"Rules were not updated properly."
diff --git a/nova/tests/test_xensm.py b/nova/tests/test_xensm.py
index ae9d5de97..c1748567b 100644
--- a/nova/tests/test_xensm.py
+++ b/nova/tests/test_xensm.py
@@ -16,18 +16,14 @@
"""Test suite for Xen Storage Manager Volume Driver."""
-import os
from nova import context
from nova import db
from nova import exception
from nova import flags
from nova.openstack.common import log as logging
-from nova import test
from nova.tests.xenapi import stubs
-from nova.virt.xenapi import driver as xenapi_conn
from nova.virt.xenapi import fake as xenapi_fake
-from nova.virt.xenapi import volume_utils
from nova.volume import xensm
LOG = logging.getLogger(__name__)
diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py
index d9c1d510d..bb31a5327 100644
--- a/nova/tests/xenapi/stubs.py
+++ b/nova/tests/xenapi/stubs.py
@@ -244,19 +244,19 @@ class FakeSessionForFirewallTests(FakeSessionForVMTests):
else:
output = ''
process_input = args.get('process_input', None)
- if cmd == ['ip6tables-save', '-t', 'filter']:
+ if cmd == ['ip6tables-save', '-c', '-t', 'filter']:
output = '\n'.join(self._in6_filter_rules)
- if cmd == ['iptables-save', '-t', 'filter']:
+ if cmd == ['iptables-save', '-c', '-t', 'filter']:
output = '\n'.join(self._in_filter_rules)
- if cmd == ['iptables-save', '-t', 'nat']:
+ if cmd == ['iptables-save', '-c', '-t', 'nat']:
output = '\n'.join(self._in_nat_rules)
- if cmd == ['iptables-restore', ]:
+ if cmd == ['iptables-restore', '-c', ]:
lines = process_input.split('\n')
if '*filter' in lines:
if self._test_case is not None:
self._test_case._out_rules = lines
output = '\n'.join(lines)
- if cmd == ['ip6tables-restore', ]:
+ if cmd == ['ip6tables-restore', '-c', ]:
lines = process_input.split('\n')
if '*filter' in lines:
output = '\n'.join(lines)
diff --git a/nova/utils.py b/nova/utils.py
index 762708bc4..a14263eaf 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -35,13 +35,11 @@ import socket
import struct
import sys
import tempfile
-import threading
import time
import uuid
import weakref
from xml.sax import saxutils
-from eventlet import corolocal
from eventlet import event
from eventlet.green import subprocess
from eventlet import greenthread
@@ -628,7 +626,7 @@ class _InterProcessLock(object):
self.unlock()
self.lockfile.close()
except IOError:
- LOG.exception(_("Could not release the aquired lock `%s`")
+ LOG.exception(_("Could not release the acquired lock `%s`")
% self.fname)
def trylock(self):
@@ -717,14 +715,21 @@ def synchronized(name, external=False):
LOG.debug(_('Attempting to grab file lock "%(lock)s" for '
'method "%(method)s"...'),
{'lock': name, 'method': f.__name__})
- lock_file_path = os.path.join(FLAGS.lock_path,
- 'nova-%s' % name)
+ lock_path = FLAGS.lock_path or tempfile.mkdtemp()
+ lock_file_path = os.path.join(lock_path, 'nova-%s' % name)
lock = InterProcessLock(lock_file_path)
- with lock:
- LOG.debug(_('Got file lock "%(lock)s" for '
- 'method "%(method)s"...'),
- {'lock': name, 'method': f.__name__})
- retval = f(*args, **kwargs)
+ try:
+ with lock:
+ LOG.debug(_('Got file lock "%(lock)s" for '
+ 'method "%(method)s"...'),
+ {'lock': name, 'method': f.__name__})
+ retval = f(*args, **kwargs)
+ finally:
+ # NOTE(vish): This removes the tempdir if we needed
+ # to create one. This is used to cleanup
+ # the locks left behind by unit tests.
+ if not FLAGS.lock_path:
+ shutil.rmtree(lock_path)
else:
retval = f(*args, **kwargs)
diff --git a/nova/virt/configdrive.py b/nova/virt/configdrive.py
index 0c3d2978e..11b9cd1c0 100644
--- a/nova/virt/configdrive.py
+++ b/nova/virt/configdrive.py
@@ -17,13 +17,10 @@
"""Config Drive v2 helper."""
-import base64
-import json
import os
import shutil
import tempfile
-from nova.api.metadata import base as instance_metadata
from nova import exception
from nova import flags
from nova.openstack.common import cfg
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index 53b37ada6..dd3646bc9 100644
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -22,7 +22,6 @@ Driver base-classes:
types that support that contract
"""
-from nova.compute import power_state
from nova import flags
from nova.openstack.common import log as logging
@@ -193,7 +192,8 @@ class ComputeDriver(object):
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
- def reboot(self, instance, network_info, reboot_type):
+ def reboot(self, instance, network_info, reboot_type,
+ block_device_info=None):
"""Reboot the specified instance.
:param instance: Instance object as returned by DB layer.
@@ -302,7 +302,8 @@ class ComputeDriver(object):
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
- def resume_state_on_host_boot(self, context, instance, network_info):
+ def resume_state_on_host_boot(self, context, instance, network_info,
+ block_device_info=None):
"""resume guest state when a host is booted"""
raise NotImplementedError()
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 2d4672876..dd20b0b15 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -29,7 +29,6 @@ from nova.compute import power_state
from nova import db
from nova import exception
from nova.openstack.common import log as logging
-from nova import utils
from nova.virt import driver
@@ -93,7 +92,8 @@ class FakeDriver(driver.ComputeDriver):
if not instance['name'] in self.instances:
raise exception.InstanceNotRunning()
- def reboot(self, instance, network_info, reboot_type):
+ def reboot(self, instance, network_info, reboot_type,
+ block_device_info=None):
pass
@staticmethod
@@ -106,7 +106,8 @@ class FakeDriver(driver.ComputeDriver):
def inject_file(self, instance, b64_path, b64_contents):
pass
- def resume_state_on_host_boot(self, context, instance, network_info):
+ def resume_state_on_host_boot(self, context, instance, network_info,
+ block_device_info=None):
pass
def rescue(self, context, instance, network_info, image_meta,
diff --git a/nova/virt/hyperv/README.rst b/nova/virt/hyperv/README.rst
new file mode 100644
index 000000000..c0609f310
--- /dev/null
+++ b/nova/virt/hyperv/README.rst
@@ -0,0 +1,44 @@
+Hyper-V Volumes Management
+=============================================
+
+To enable the volume features, the first thing that needs to be done is to
+enable the iSCSI service on the Windows compute nodes and set it to start
+automatically.
+
+sc config msiscsi start= auto
+net start msiscsi
+
+In Windows Server 2012, it's important to execute the following commands to
+prevent having the volumes being online by default:
+
+diskpart
+san policy=OfflineAll
+exit
+
+How to check if your iSCSI configuration is working properly:
+
+On your OpenStack controller:
+
+1. Create a volume with e.g. "nova volume-create 1" and note the generated
+volume id
+
+On Windows:
+
+2. iscsicli QAddTargetPortal <your_iSCSI_target>
+3. iscsicli ListTargets
+
+The output should contain the iqn related to your volume:
+iqn.2010-10.org.openstack:volume-<volume_id>
+
+How to test Boot from volume in Hyper-V from the OpenStack dashboard:
+
+1. Fist of all create a volume
+2. Get the volume ID of the created volume
+3. Upload and untar to the Cloud controller the next VHD image:
+http://dev.opennebula.org/attachments/download/482/ttylinux.vhd.gz
+4. sudo dd if=/path/to/vhdfileofstep3
+of=/dev/nova-volumes/volume-XXXXX <- Related to the ID of step 2
+5. Launch an instance from any image (this is not important because we are
+just booting from a volume) from the dashboard, and don't forget to select
+boot from volume and select the volume created in step2. Important: Device
+name must be "vda".
diff --git a/nova/virt/hyperv/__init__.py b/nova/virt/hyperv/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/nova/virt/hyperv/__init__.py
diff --git a/nova/virt/hyperv/baseops.py b/nova/virt/hyperv/baseops.py
new file mode 100644
index 000000000..3d941a854
--- /dev/null
+++ b/nova/virt/hyperv/baseops.py
@@ -0,0 +1,61 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 Cloudbase Solutions Srl
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Management base class for Hyper-V operations.
+"""
+import sys
+
+from nova.openstack.common import log as logging
+
+# Check needed for unit testing on Unix
+if sys.platform == 'win32':
+ import wmi
+
+LOG = logging.getLogger(__name__)
+
+
+class BaseOps(object):
+ def __init__(self):
+ self.__conn = None
+ self.__conn_v2 = None
+ self.__conn_cimv2 = None
+ self.__conn_wmi = None
+
+ @property
+ def _conn(self):
+ if self.__conn is None:
+ self.__conn = wmi.WMI(moniker='//./root/virtualization')
+ return self.__conn
+
+ @property
+ def _conn_v2(self):
+ if self.__conn_v2 is None:
+ self.__conn_v2 = wmi.WMI(moniker='//./root/virtualization/v2')
+ return self.__conn_v2
+
+ @property
+ def _conn_cimv2(self):
+ if self.__conn_cimv2 is None:
+ self.__conn_cimv2 = wmi.WMI(moniker='//./root/cimv2')
+ return self.__conn_cimv2
+
+ @property
+ def _conn_wmi(self):
+ if self.__conn_wmi is None:
+ self.__conn_wmi = wmi.WMI(moniker='//./root/wmi')
+ return self.__conn_wmi
diff --git a/nova/virt/hyperv/constants.py b/nova/virt/hyperv/constants.py
new file mode 100644
index 000000000..392dcfa13
--- /dev/null
+++ b/nova/virt/hyperv/constants.py
@@ -0,0 +1,54 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 Cloudbase Solutions Srl
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Constants used in ops classes
+"""
+
+from nova.compute import power_state
+
+HYPERV_VM_STATE_ENABLED = 2
+HYPERV_VM_STATE_DISABLED = 3
+HYPERV_VM_STATE_REBOOT = 10
+HYPERV_VM_STATE_RESET = 11
+HYPERV_VM_STATE_PAUSED = 32768
+HYPERV_VM_STATE_SUSPENDED = 32769
+
+HYPERV_POWER_STATE = {
+ HYPERV_VM_STATE_DISABLED: power_state.SHUTDOWN,
+ HYPERV_VM_STATE_ENABLED: power_state.RUNNING,
+ HYPERV_VM_STATE_PAUSED: power_state.PAUSED,
+ HYPERV_VM_STATE_SUSPENDED: power_state.SUSPENDED
+}
+
+REQ_POWER_STATE = {
+ 'Enabled': HYPERV_VM_STATE_ENABLED,
+ 'Disabled': HYPERV_VM_STATE_DISABLED,
+ 'Reboot': HYPERV_VM_STATE_REBOOT,
+ 'Reset': HYPERV_VM_STATE_RESET,
+ 'Paused': HYPERV_VM_STATE_PAUSED,
+ 'Suspended': HYPERV_VM_STATE_SUSPENDED,
+}
+
+WMI_JOB_STATUS_STARTED = 4096
+WMI_JOB_STATE_RUNNING = 4
+WMI_JOB_STATE_COMPLETED = 7
+
+VM_SUMMARY_NUM_PROCS = 4
+VM_SUMMARY_ENABLED_STATE = 100
+VM_SUMMARY_MEMORY_USAGE = 103
+VM_SUMMARY_UPTIME = 105
diff --git a/nova/virt/hyperv/driver.py b/nova/virt/hyperv/driver.py
new file mode 100644
index 000000000..85ff50e95
--- /dev/null
+++ b/nova/virt/hyperv/driver.py
@@ -0,0 +1,227 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2010 Cloud.com, Inc
+# Copyright (c) 2012 Cloudbase Solutions Srl
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+A connection to Hyper-V .
+Uses Windows Management Instrumentation (WMI) calls to interact with Hyper-V
+Hyper-V WMI usage:
+ http://msdn.microsoft.com/en-us/library/cc723875%28v=VS.85%29.aspx
+The Hyper-V object model briefly:
+ The physical computer and its hosted virtual machines are each represented
+ by the Msvm_ComputerSystem class.
+
+ Each virtual machine is associated with a
+ Msvm_VirtualSystemGlobalSettingData (vs_gs_data) instance and one or more
+ Msvm_VirtualSystemSettingData (vmsetting) instances. For each vmsetting
+ there is a series of Msvm_ResourceAllocationSettingData (rasd) objects.
+ The rasd objects describe the settings for each device in a VM.
+ Together, the vs_gs_data, vmsettings and rasds describe the configuration
+ of the virtual machine.
+
+ Creating new resources such as disks and nics involves cloning a default
+ rasd object and appropriately modifying the clone and calling the
+ AddVirtualSystemResources WMI method
+ Changing resources such as memory uses the ModifyVirtualSystemResources
+ WMI method
+
+Using the Python WMI library:
+ Tutorial:
+ http://timgolden.me.uk/python/wmi/tutorial.html
+ Hyper-V WMI objects can be retrieved simply by using the class name
+ of the WMI object and optionally specifying a column to filter the
+ result set. More complex filters can be formed using WQL (sql-like)
+ queries.
+ The parameters and return tuples of WMI method calls can gleaned by
+ examining the doc string. For example:
+ >>> vs_man_svc.ModifyVirtualSystemResources.__doc__
+ ModifyVirtualSystemResources (ComputerSystem, ResourceSettingData[])
+ => (Job, ReturnValue)'
+ When passing setting data (ResourceSettingData) to the WMI method,
+ an XML representation of the data is passed in using GetText_(1).
+ Available methods on a service can be determined using method.keys():
+ >>> vs_man_svc.methods.keys()
+ vmsettings and rasds for a vm can be retrieved using the 'associators'
+ method with the appropriate return class.
+ Long running WMI commands generally return a Job (an instance of
+ Msvm_ConcreteJob) whose state can be polled to determine when it finishes
+
+"""
+
+from nova.openstack.common import log as logging
+from nova.virt import driver
+from nova.virt.hyperv import livemigrationops
+from nova.virt.hyperv import snapshotops
+from nova.virt.hyperv import vmops
+from nova.virt.hyperv import volumeops
+
+LOG = logging.getLogger(__name__)
+
+
+class HyperVDriver(driver.ComputeDriver):
+ def __init__(self):
+ super(HyperVDriver, self).__init__()
+
+ self._volumeops = volumeops.VolumeOps()
+ self._vmops = vmops.VMOps(self._volumeops)
+ self._snapshotops = snapshotops.SnapshotOps()
+ self._livemigrationops = livemigrationops.LiveMigrationOps(
+ self._volumeops)
+
+ def init_host(self, host):
+ self._host = host
+
+ def list_instances(self):
+ return self._vmops.list_instances()
+
+ def list_instances_detail(self):
+ return self._vmops.list_instances_detail()
+
+ def spawn(self, context, instance, image_meta, network_info,
+ block_device_info=None):
+ self._vmops.spawn(context, instance, image_meta, network_info,
+ block_device_info)
+
+ def reboot(self, instance, network_info, reboot_type,
+ block_device_info=None):
+ self._vmops.reboot(instance, network_info, reboot_type)
+
+ def destroy(self, instance, network_info=None, cleanup=True):
+ self._vmops.destroy(instance, network_info, cleanup)
+
+ def get_info(self, instance):
+ return self._vmops.get_info(instance)
+
+ def attach_volume(self, connection_info, instance_name, mountpoint):
+ """Attach volume storage to VM instance"""
+ return self._volumeops.attach_volume(connection_info,
+ instance_name,
+ mountpoint)
+
+ def detach_volume(self, connection_info, instance_name, mountpoint):
+ """Detach volume storage to VM instance"""
+ return self._volumeops.detach_volume(connection_info,
+ instance_name,
+ mountpoint)
+
+ def get_volume_connector(self, instance):
+ return self._volumeops.get_volume_connector(instance)
+
+ def poll_rescued_instances(self, timeout):
+ pass
+
+ def update_available_resource(self, context, host):
+ self._vmops.update_available_resource(context, host)
+
+ def update_host_status(self):
+ """See xenapi_conn.py implementation."""
+ pass
+
+ def get_host_stats(self, refresh=False):
+ """See xenapi_conn.py implementation."""
+ return {}
+
+ def host_power_action(self, host, action):
+ """Reboots, shuts down or powers up the host."""
+ pass
+
+ def set_host_enabled(self, host, enabled):
+ """Sets the specified host's ability to accept new instances."""
+ pass
+
+ def snapshot(self, context, instance, name):
+ self._snapshotops.snapshot(context, instance, name)
+
+ def pause(self, instance):
+ self._vmops.pause(instance)
+
+ def unpause(self, instance):
+ self._vmops.unpause(instance)
+
+ def suspend(self, instance):
+ self._vmops.suspend(instance)
+
+ def resume(self, instance):
+ self._vmops.resume(instance)
+
+ def power_off(self, instance):
+ self._vmops.power_off(instance)
+
+ def power_on(self, instance):
+ self._vmops.power_on(instance)
+
+ def live_migration(self, context, instance_ref, dest, post_method,
+ recover_method, block_migration=False):
+ self._livemigrationops.live_migration(context, instance_ref, dest,
+ post_method, recover_method, block_migration)
+
+ def compare_cpu(self, cpu_info):
+ return self._livemigrationops.compare_cpu(cpu_info)
+
+ def pre_live_migration(self, context, instance, block_device_info,
+ network_info):
+ self._livemigrationops.pre_live_migration(context, instance,
+ block_device_info, network_info)
+
+ def post_live_migration_at_destination(self, ctxt, instance_ref,
+ network_info, block_migration):
+ self._livemigrationops.post_live_migration_at_destination(ctxt,
+ instance_ref, network_info, block_migration)
+
+ def check_can_live_migrate_destination(self, ctxt, instance,
+ block_migration, disk_over_commit):
+ pass
+
+ def check_can_live_migrate_destination_cleanup(self, ctxt,
+ dest_check_data):
+ pass
+
+ def check_can_live_migrate_source(self, ctxt, instance, dest_check_data):
+ pass
+
+ def plug_vifs(self, instance, network_info):
+ LOG.debug(_("plug_vifs called"), instance=instance)
+
+ def unplug_vifs(self, instance, network_info):
+ LOG.debug(_("plug_vifs called"), instance=instance)
+
+ def ensure_filtering_rules_for_instance(self, instance_ref, network_info):
+ LOG.debug(_("ensure_filtering_rules_for_instance called"),
+ instance=instance_ref)
+
+ def unfilter_instance(self, instance, network_info):
+ """Stop filtering instance"""
+ LOG.debug(_("unfilter_instance called"), instance=instance)
+
+ def confirm_migration(self, migration, instance, network_info):
+ """Confirms a resize, destroying the source VM"""
+ LOG.debug(_("confirm_migration called"), instance=instance)
+
+ def finish_revert_migration(self, instance, network_info):
+ """Finish reverting a resize, powering back on the instance"""
+ LOG.debug(_("finish_revert_migration called"), instance=instance)
+
+ def finish_migration(self, context, migration, instance, disk_info,
+ network_info, image_meta, resize_instance=False):
+ """Completes a resize, turning on the migrated instance"""
+ LOG.debug(_("finish_migration called"), instance=instance)
+
+ def get_console_output(self, instance):
+ LOG.debug(_("get_console_output called"), instance=instance)
+ return ''
+
+ def legacy_nwinfo(self):
+ return False
diff --git a/nova/virt/hyperv/ioutils.py b/nova/virt/hyperv/ioutils.py
new file mode 100644
index 000000000..d927e317f
--- /dev/null
+++ b/nova/virt/hyperv/ioutils.py
@@ -0,0 +1,26 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 Cloudbase Solutions Srl
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Utility class to ease the task of creating stubs of built in IO functions.
+"""
+
+import __builtin__
+
+
+def open(name, mode):
+ return __builtin__.open(name, mode)
diff --git a/nova/virt/hyperv/livemigrationops.py b/nova/virt/hyperv/livemigrationops.py
new file mode 100644
index 000000000..1f97adf24
--- /dev/null
+++ b/nova/virt/hyperv/livemigrationops.py
@@ -0,0 +1,162 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 Cloudbase Solutions Srl
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Management class for live migration VM operations.
+"""
+import os
+import sys
+
+from nova import exception
+from nova import flags
+from nova.openstack.common import excutils
+from nova.openstack.common import log as logging
+from nova.virt.hyperv import baseops
+from nova.virt.hyperv import constants
+from nova.virt.hyperv import vmutils
+
+# Check needed for unit testing on Unix
+if sys.platform == 'win32':
+ import wmi
+
+LOG = logging.getLogger(__name__)
+FLAGS = flags.FLAGS
+
+
+class LiveMigrationOps(baseops.BaseOps):
+ def __init__(self, volumeops):
+ super(LiveMigrationOps, self).__init__()
+
+ self._vmutils = vmutils.VMUtils()
+ self._volumeops = volumeops
+
+ def _check_live_migration_config(self):
+ try:
+ self._conn_v2
+ except Exception:
+ raise vmutils.HyperVException(
+ _('Live migration is not supported " \
+ "by this version of Hyper-V'))
+
+ migration_svc = self._conn_v2.Msvm_VirtualSystemMigrationService()[0]
+ vsmssd = migration_svc.associators(
+ wmi_association_class='Msvm_ElementSettingData',
+ wmi_result_class='Msvm_VirtualSystemMigrationServiceSettingData')[0]
+ if not vsmssd.EnableVirtualSystemMigration:
+ raise vmutils.HyperVException(
+ _('Live migration is not enabled on this host'))
+ if not migration_svc.MigrationServiceListenerIPAddressList:
+ raise vmutils.HyperVException(
+ _('Live migration networks are not configured on this host'))
+
+ def live_migration(self, context, instance_ref, dest, post_method,
+ recover_method, block_migration=False):
+ LOG.debug(_("live_migration called"), instance=instance_ref)
+ instance_name = instance_ref["name"]
+
+ try:
+ self._check_live_migration_config()
+
+ vm_name = self._vmutils.lookup(self._conn, instance_name)
+ if vm_name is None:
+ raise exception.InstanceNotFound(instance=instance_name)
+ vm = self._conn_v2.Msvm_ComputerSystem(
+ ElementName=instance_name)[0]
+ vm_settings = vm.associators(
+ wmi_association_class='Msvm_SettingsDefineState',
+ wmi_result_class='Msvm_VirtualSystemSettingData')[0]
+
+ new_resource_setting_data = []
+ sasds = vm_settings.associators(
+ wmi_association_class='Msvm_VirtualSystemSettingDataComponent',
+ wmi_result_class='Msvm_StorageAllocationSettingData')
+ for sasd in sasds:
+ if sasd.ResourceType == 31 and \
+ sasd.ResourceSubType == \
+ "Microsoft:Hyper-V:Virtual Hard Disk":
+ #sasd.PoolId = ""
+ new_resource_setting_data.append(sasd.GetText_(1))
+
+ LOG.debug(_("Getting live migration networks for remote "
+ "host: %s"), dest)
+ _conn_v2_remote = wmi.WMI(
+ moniker='//' + dest + '/root/virtualization/v2')
+ migration_svc_remote = \
+ _conn_v2_remote.Msvm_VirtualSystemMigrationService()[0]
+ remote_ip_address_list = \
+ migration_svc_remote.MigrationServiceListenerIPAddressList
+
+ # VirtualSystemAndStorage
+ vsmsd = self._conn_v2.query("select * from "
+ "Msvm_VirtualSystemMigrationSettingData "
+ "where MigrationType = 32771")[0]
+ vsmsd.DestinationIPAddressList = remote_ip_address_list
+ migration_setting_data = vsmsd.GetText_(1)
+
+ migration_svc =\
+ self._conn_v2.Msvm_VirtualSystemMigrationService()[0]
+
+ LOG.debug(_("Starting live migration for instance: %s"),
+ instance_name)
+ (job_path, ret_val) = migration_svc.MigrateVirtualSystemToHost(
+ ComputerSystem=vm.path_(),
+ DestinationHost=dest,
+ MigrationSettingData=migration_setting_data,
+ NewResourceSettingData=new_resource_setting_data)
+ if ret_val == constants.WMI_JOB_STATUS_STARTED:
+ success = self._vmutils.check_job_status(job_path)
+ else:
+ success = (ret_val == 0)
+ if not success:
+ raise vmutils.HyperVException(
+ _('Failed to live migrate VM %s') % instance_name)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.debug(_("Calling live migration recover_method "
+ "for instance: %s"), instance_name)
+ recover_method(context, instance_ref, dest, block_migration)
+
+ LOG.debug(_("Calling live migration post_method for instance: %s"),
+ instance_name)
+ post_method(context, instance_ref, dest, block_migration)
+
+ def pre_live_migration(self, context, instance, block_device_info,
+ network_info):
+ LOG.debug(_("pre_live_migration called"), instance=instance)
+ self._check_live_migration_config()
+
+ if FLAGS.use_cow_images:
+ ebs_root = self._volumeops.volume_in_mapping(
+ self._volumeops.get_default_root_device(),
+ block_device_info)
+ if not ebs_root:
+ base_vhd_path = self._vmutils.get_base_vhd_path(
+ instance["image_ref"])
+ if not os.path.exists(base_vhd_path):
+ self._vmutils.fetch_image(base_vhd_path, context,
+ instance["image_ref"],
+ instance["user_id"],
+ instance["project_id"])
+
+ def post_live_migration_at_destination(self, ctxt, instance_ref,
+ network_info, block_migration):
+ LOG.debug(_("post_live_migration_at_destination called"),
+ instance=instance_ref)
+
+ def compare_cpu(self, cpu_info):
+ LOG.debug(_("compare_cpu called %s"), cpu_info)
+ return True
diff --git a/nova/virt/hyperv/snapshotops.py b/nova/virt/hyperv/snapshotops.py
new file mode 100644
index 000000000..5e4676a4a
--- /dev/null
+++ b/nova/virt/hyperv/snapshotops.py
@@ -0,0 +1,187 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 Cloudbase Solutions Srl
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Management class for VM snapshot operations.
+"""
+import os
+import shutil
+import sys
+
+from nova import exception
+from nova import flags
+from nova.image import glance
+from nova.openstack.common import log as logging
+from nova.virt.hyperv import baseops
+from nova.virt.hyperv import constants
+from nova.virt.hyperv import ioutils
+from nova.virt.hyperv import vmutils
+from xml.etree import ElementTree
+
+# Check needed for unit testing on Unix
+if sys.platform == 'win32':
+ import wmi
+
+FLAGS = flags.FLAGS
+LOG = logging.getLogger(__name__)
+
+
+class SnapshotOps(baseops.BaseOps):
+ def __init__(self):
+ super(SnapshotOps, self).__init__()
+ self._vmutils = vmutils.VMUtils()
+
+ def snapshot(self, context, instance, name):
+ """Create snapshot from a running VM instance."""
+ instance_name = instance["name"]
+ vm = self._vmutils.lookup(self._conn, instance_name)
+ if vm is None:
+ raise exception.InstanceNotFound(instance=instance_name)
+ vm = self._conn.Msvm_ComputerSystem(ElementName=instance_name)[0]
+ vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
+
+ LOG.debug(_("Creating snapshot for instance %s"), instance_name)
+ (job_path, ret_val, snap_setting_data) = \
+ vs_man_svc.CreateVirtualSystemSnapshot(vm.path_())
+ if ret_val == constants.WMI_JOB_STATUS_STARTED:
+ success = self._vmutils.check_job_status(job_path)
+ if success:
+ job_wmi_path = job_path.replace('\\', '/')
+ job = wmi.WMI(moniker=job_wmi_path)
+ snap_setting_data = job.associators(
+ wmi_result_class='Msvm_VirtualSystemSettingData')[0]
+ else:
+ success = (ret_val == 0)
+ if not success:
+ raise vmutils.HyperVException(
+ _('Failed to create snapshot for VM %s') %
+ instance_name)
+
+ export_folder = None
+ f = None
+
+ try:
+ src_vhd_path = os.path.join(FLAGS.instances_path, instance_name,
+ instance_name + ".vhd")
+
+ image_man_svc = self._conn.Msvm_ImageManagementService()[0]
+
+ LOG.debug(_("Getting info for VHD %s"), src_vhd_path)
+ (src_vhd_info, job_path, ret_val) = \
+ image_man_svc.GetVirtualHardDiskInfo(src_vhd_path)
+ if ret_val == constants.WMI_JOB_STATUS_STARTED:
+ success = self._vmutils.check_job_status(job_path)
+ else:
+ success = (ret_val == 0)
+ if not success:
+ raise vmutils.HyperVException(
+ _("Failed to get info for disk %s") %
+ (src_vhd_path))
+
+ src_base_disk_path = None
+ et = ElementTree.fromstring(src_vhd_info)
+ for item in et.findall("PROPERTY"):
+ if item.attrib["NAME"] == "ParentPath":
+ src_base_disk_path = item.find("VALUE").text
+ break
+
+ export_folder = self._vmutils.make_export_path(instance_name)
+
+ dest_vhd_path = os.path.join(export_folder, os.path.basename(
+ src_vhd_path))
+ LOG.debug(_('Copying VHD %(src_vhd_path)s to %(dest_vhd_path)s'),
+ locals())
+ shutil.copyfile(src_vhd_path, dest_vhd_path)
+
+ image_vhd_path = None
+ if not src_base_disk_path:
+ image_vhd_path = dest_vhd_path
+ else:
+ dest_base_disk_path = os.path.join(export_folder,
+ os.path.basename(src_base_disk_path))
+ LOG.debug(_('Copying base disk %(src_vhd_path)s to '
+ '%(dest_base_disk_path)s'), locals())
+ shutil.copyfile(src_base_disk_path, dest_base_disk_path)
+
+ LOG.debug(_("Reconnecting copied base VHD "
+ "%(dest_base_disk_path)s and diff VHD %(dest_vhd_path)s"),
+ locals())
+ (job_path, ret_val) = \
+ image_man_svc.ReconnectParentVirtualHardDisk(
+ ChildPath=dest_vhd_path,
+ ParentPath=dest_base_disk_path,
+ Force=True)
+ if ret_val == constants.WMI_JOB_STATUS_STARTED:
+ success = self._vmutils.check_job_status(job_path)
+ else:
+ success = (ret_val == 0)
+ if not success:
+ raise vmutils.HyperVException(
+ _("Failed to reconnect base disk "
+ "%(dest_base_disk_path)s and diff disk "
+ "%(dest_vhd_path)s") %
+ locals())
+
+ LOG.debug(_("Merging base disk %(dest_base_disk_path)s and "
+ "diff disk %(dest_vhd_path)s"),
+ locals())
+ (job_path, ret_val) = image_man_svc.MergeVirtualHardDisk(
+ SourcePath=dest_vhd_path,
+ DestinationPath=dest_base_disk_path)
+ if ret_val == constants.WMI_JOB_STATUS_STARTED:
+ success = self._vmutils.check_job_status(job_path)
+ else:
+ success = (ret_val == 0)
+ if not success:
+ raise vmutils.HyperVException(
+ _("Failed to merge base disk %(dest_base_disk_path)s "
+ "and diff disk %(dest_vhd_path)s") %
+ locals())
+ image_vhd_path = dest_base_disk_path
+
+ (glance_image_service, image_id) = \
+ glance.get_remote_image_service(context, name)
+ image_metadata = {"is_public": False,
+ "disk_format": "vhd",
+ "container_format": "bare",
+ "properties": {}}
+ f = ioutils.open(image_vhd_path, 'rb')
+ LOG.debug(
+ _("Updating Glance image %(image_id)s with content from "
+ "merged disk %(image_vhd_path)s"),
+ locals())
+ glance_image_service.update(context, image_id, image_metadata, f)
+
+ LOG.debug(_("Snapshot image %(image_id)s updated for VM "
+ "%(instance_name)s"), locals())
+ finally:
+ LOG.debug(_("Removing snapshot %s"), name)
+ (job_path, ret_val) = vs_man_svc.RemoveVirtualSystemSnapshot(
+ snap_setting_data.path_())
+ if ret_val == constants.WMI_JOB_STATUS_STARTED:
+ success = self._vmutils.check_job_status(job_path)
+ else:
+ success = (ret_val == 0)
+ if not success:
+ raise vmutils.HyperVException(
+ _('Failed to remove snapshot for VM %s') %
+ instance_name)
+ if f:
+ f.close()
+ if export_folder:
+ LOG.debug(_('Removing folder %s '), export_folder)
+ shutil.rmtree(export_folder)
diff --git a/nova/virt/hyperv/vmops.py b/nova/virt/hyperv/vmops.py
new file mode 100644
index 000000000..94cb7477e
--- /dev/null
+++ b/nova/virt/hyperv/vmops.py
@@ -0,0 +1,650 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 Cloudbase Solutions Srl
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Management class for basic VM operations.
+"""
+import multiprocessing
+import os
+import uuid
+
+from nova import db
+from nova import exception
+from nova import flags
+from nova.openstack.common import cfg
+from nova.openstack.common import log as logging
+from nova import utils
+from nova.virt import driver
+from nova.virt.hyperv import baseops
+from nova.virt.hyperv import constants
+from nova.virt.hyperv import vmutils
+
+LOG = logging.getLogger(__name__)
+
+hyperv_opts = [
+ cfg.StrOpt('vswitch_name',
+ default=None,
+ help='Default vSwitch Name, '
+ 'if none provided first external is used'),
+ cfg.BoolOpt('limit_cpu_features',
+ default=False,
+ help='required for live migration among '
+ 'hosts with different CPU features')
+ ]
+
+FLAGS = flags.FLAGS
+FLAGS.register_opts(hyperv_opts)
+
+
+class VMOps(baseops.BaseOps):
+ def __init__(self, volumeops):
+ super(VMOps, self).__init__()
+
+ self._vmutils = vmutils.VMUtils()
+ self._volumeops = volumeops
+
+ def list_instances(self):
+ """ Return the names of all the instances known to Hyper-V. """
+ vms = [v.ElementName
+ for v in self._conn.Msvm_ComputerSystem(['ElementName'],
+ Caption="Virtual Machine")]
+ return vms
+
+ def list_instances_detail(self):
+ instance_infos = []
+ for instance_name in self.list_instances():
+ info = self._get_info(instance_name)
+ instance_info = driver.InstanceInfo(
+ instance_name, int(info['state']))
+ instance_infos.append(instance_info)
+ return instance_infos
+
+ def get_info(self, instance):
+ """Get information about the VM"""
+ LOG.debug(_("get_info called for instance"), instance=instance)
+ instance_name = instance["name"]
+ return self._get_info(instance_name)
+
+ def _get_info(self, instance_name):
+ vm = self._vmutils.lookup(self._conn, instance_name)
+ if vm is None:
+ raise exception.InstanceNotFound(instance=instance_name)
+ vm = self._conn.Msvm_ComputerSystem(
+ ElementName=instance_name)[0]
+ vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
+ vmsettings = vm.associators(
+ wmi_association_class='Msvm_SettingsDefineState',
+ wmi_result_class='Msvm_VirtualSystemSettingData')
+ settings_paths = [v.path_() for v in vmsettings]
+ #See http://msdn.microsoft.com/en-us/library/cc160706%28VS.85%29.aspx
+ summary_info = vs_man_svc.GetSummaryInformation(
+ [constants.VM_SUMMARY_NUM_PROCS,
+ constants.VM_SUMMARY_ENABLED_STATE,
+ constants.VM_SUMMARY_MEMORY_USAGE,
+ constants.VM_SUMMARY_UPTIME],
+ settings_paths)[1]
+ info = summary_info[0]
+
+ LOG.debug(_("hyperv vm state: %s"), info.EnabledState)
+ state = str(constants.HYPERV_POWER_STATE[info.EnabledState])
+ memusage = str(info.MemoryUsage)
+ numprocs = str(info.NumberOfProcessors)
+ uptime = str(info.UpTime)
+
+ LOG.debug(_("Got Info for vm %(instance_name)s: state=%(state)s,"
+ " mem=%(memusage)s, num_cpu=%(numprocs)s,"
+ " uptime=%(uptime)s"), locals())
+
+ return {'state': state,
+ 'max_mem': info.MemoryUsage,
+ 'mem': info.MemoryUsage,
+ 'num_cpu': info.NumberOfProcessors,
+ 'cpu_time': info.UpTime}
+
+ def spawn(self, context, instance, image_meta, network_info,
+ block_device_info=None):
+ """ Create a new VM and start it."""
+ instance_name = instance["name"]
+ vm = self._vmutils.lookup(self._conn, instance_name)
+ if vm is not None:
+ raise exception.InstanceExists(name=instance_name)
+
+ ebs_root = self._volumeops.volume_in_mapping(
+ self._volumeops.get_default_root_device(),
+ block_device_info)
+
+ #If is not a boot from volume spawn
+ if not (ebs_root):
+ #Fetch the file, assume it is a VHD file.
+ vhdfile = self._vmutils.get_vhd_path(instance_name)
+ try:
+ self._cache_image(fn=self._vmutils.fetch_image,
+ context=context,
+ target=vhdfile,
+ fname=instance['image_ref'],
+ image_id=instance['image_ref'],
+ user=instance['user_id'],
+ project=instance['project_id'],
+ cow=FLAGS.use_cow_images)
+ except Exception as exn:
+ LOG.exception(_('cache image failed: %s'), exn)
+ self.destroy(instance)
+
+ try:
+ self._create_vm(instance)
+
+ if not ebs_root:
+ self._create_disk(instance['name'], vhdfile)
+ else:
+ self._volumeops.attach_boot_volume(block_device_info,
+ instance_name)
+
+ #A SCSI controller for volumes connection is created
+ self._create_scsi_controller(instance['name'])
+
+ for vif in network_info:
+ mac_address = vif['address'].replace(':', '')
+ self._create_nic(instance['name'], mac_address)
+
+ LOG.debug(_('Starting VM %s '), instance_name)
+ self._set_vm_state(instance['name'], 'Enabled')
+ LOG.info(_('Started VM %s '), instance_name)
+ except Exception as exn:
+ LOG.exception(_('spawn vm failed: %s'), exn)
+ self.destroy(instance)
+ raise exn
+
+ def _create_vm(self, instance):
+ """Create a VM but don't start it. """
+ instance_name = instance["name"]
+ vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
+
+ vs_gs_data = self._conn.Msvm_VirtualSystemGlobalSettingData.new()
+ vs_gs_data.ElementName = instance_name
+ (job, ret_val) = vs_man_svc.DefineVirtualSystem(
+ [], None, vs_gs_data.GetText_(1))[1:]
+ if ret_val == constants.WMI_JOB_STATUS_STARTED:
+ success = self._vmutils.check_job_status(job)
+ else:
+ success = (ret_val == 0)
+
+ if not success:
+ raise vmutils.HyperVException(_('Failed to create VM %s') %
+ instance_name)
+
+ LOG.debug(_('Created VM %s...'), instance_name)
+ vm = self._conn.Msvm_ComputerSystem(ElementName=instance_name)[0]
+
+ vmsettings = vm.associators(
+ wmi_result_class='Msvm_VirtualSystemSettingData')
+ vmsetting = [s for s in vmsettings
+ if s.SettingType == 3][0] # avoid snapshots
+ memsetting = vmsetting.associators(
+ wmi_result_class='Msvm_MemorySettingData')[0]
+ #No Dynamic Memory, so reservation, limit and quantity are identical.
+ mem = long(str(instance['memory_mb']))
+ memsetting.VirtualQuantity = mem
+ memsetting.Reservation = mem
+ memsetting.Limit = mem
+
+ (job, ret_val) = vs_man_svc.ModifyVirtualSystemResources(
+ vm.path_(), [memsetting.GetText_(1)])
+ LOG.debug(_('Set memory for vm %s...'), instance_name)
+ procsetting = vmsetting.associators(
+ wmi_result_class='Msvm_ProcessorSettingData')[0]
+ vcpus = long(instance['vcpus'])
+ procsetting.VirtualQuantity = vcpus
+ procsetting.Reservation = vcpus
+ procsetting.Limit = 100000 # static assignment to 100%
+
+ if FLAGS.limit_cpu_features:
+ procsetting.LimitProcessorFeatures = True
+
+ (job, ret_val) = vs_man_svc.ModifyVirtualSystemResources(
+ vm.path_(), [procsetting.GetText_(1)])
+ LOG.debug(_('Set vcpus for vm %s...'), instance_name)
+
+ def _create_scsi_controller(self, vm_name):
+ """ Create an iscsi controller ready to mount volumes """
+ LOG.debug(_('Creating a scsi controller for %(vm_name)s for volume '
+ 'attaching') % locals())
+ vms = self._conn.MSVM_ComputerSystem(ElementName=vm_name)
+ vm = vms[0]
+ scsicontrldefault = self._conn.query(
+ "SELECT * FROM Msvm_ResourceAllocationSettingData \
+ WHERE ResourceSubType = 'Microsoft Synthetic SCSI Controller'\
+ AND InstanceID LIKE '%Default%'")[0]
+ if scsicontrldefault is None:
+ raise vmutils.HyperVException(_('Controller not found'))
+ scsicontrl = self._vmutils.clone_wmi_obj(self._conn,
+ 'Msvm_ResourceAllocationSettingData', scsicontrldefault)
+ scsicontrl.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}']
+ scsiresource = self._vmutils.add_virt_resource(self._conn,
+ scsicontrl, vm)
+ if scsiresource is None:
+ raise vmutils.HyperVException(
+ _('Failed to add scsi controller to VM %s') %
+ vm_name)
+
+ def _create_disk(self, vm_name, vhdfile):
+ """Create a disk and attach it to the vm"""
+ LOG.debug(_('Creating disk for %(vm_name)s by attaching'
+ ' disk file %(vhdfile)s') % locals())
+ #Find the IDE controller for the vm.
+ vms = self._conn.MSVM_ComputerSystem(ElementName=vm_name)
+ vm = vms[0]
+ vmsettings = vm.associators(
+ wmi_result_class='Msvm_VirtualSystemSettingData')
+ rasds = vmsettings[0].associators(
+ wmi_result_class='MSVM_ResourceAllocationSettingData')
+ ctrller = [r for r in rasds
+ if r.ResourceSubType == 'Microsoft Emulated IDE Controller'
+ and r.Address == "0"]
+ #Find the default disk drive object for the vm and clone it.
+ diskdflt = self._conn.query(
+ "SELECT * FROM Msvm_ResourceAllocationSettingData \
+ WHERE ResourceSubType LIKE 'Microsoft Synthetic Disk Drive'\
+ AND InstanceID LIKE '%Default%'")[0]
+ diskdrive = self._vmutils.clone_wmi_obj(self._conn,
+ 'Msvm_ResourceAllocationSettingData', diskdflt)
+ #Set the IDE ctrller as parent.
+ diskdrive.Parent = ctrller[0].path_()
+ diskdrive.Address = 0
+ #Add the cloned disk drive object to the vm.
+ new_resources = self._vmutils.add_virt_resource(self._conn,
+ diskdrive, vm)
+ if new_resources is None:
+ raise vmutils.HyperVException(
+ _('Failed to add diskdrive to VM %s') %
+ vm_name)
+ diskdrive_path = new_resources[0]
+ LOG.debug(_('New disk drive path is %s'), diskdrive_path)
+ #Find the default VHD disk object.
+ vhddefault = self._conn.query(
+ "SELECT * FROM Msvm_ResourceAllocationSettingData \
+ WHERE ResourceSubType LIKE 'Microsoft Virtual Hard Disk' AND \
+ InstanceID LIKE '%Default%' ")[0]
+
+ #Clone the default and point it to the image file.
+ vhddisk = self._vmutils.clone_wmi_obj(self._conn,
+ 'Msvm_ResourceAllocationSettingData', vhddefault)
+ #Set the new drive as the parent.
+ vhddisk.Parent = diskdrive_path
+ vhddisk.Connection = [vhdfile]
+
+ #Add the new vhd object as a virtual hard disk to the vm.
+ new_resources = self._vmutils.add_virt_resource(self._conn,
+ vhddisk, vm)
+ if new_resources is None:
+ raise vmutils.HyperVException(
+ _('Failed to add vhd file to VM %s') %
+ vm_name)
+ LOG.info(_('Created disk for %s'), vm_name)
+
+ def _create_nic(self, vm_name, mac):
+ """Create a (synthetic) nic and attach it to the vm"""
+ LOG.debug(_('Creating nic for %s '), vm_name)
+ #Find the vswitch that is connected to the physical nic.
+ vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name)
+ extswitch = self._find_external_network()
+ if extswitch is None:
+ raise vmutils.HyperVException(_('Cannot find vSwitch'))
+
+ vm = vms[0]
+ switch_svc = self._conn.Msvm_VirtualSwitchManagementService()[0]
+ #Find the default nic and clone it to create a new nic for the vm.
+ #Use Msvm_SyntheticEthernetPortSettingData for Windows or Linux with
+ #Linux Integration Components installed.
+ syntheticnics_data = self._conn.Msvm_SyntheticEthernetPortSettingData()
+ default_nic_data = [n for n in syntheticnics_data
+ if n.InstanceID.rfind('Default') > 0]
+ new_nic_data = self._vmutils.clone_wmi_obj(self._conn,
+ 'Msvm_SyntheticEthernetPortSettingData',
+ default_nic_data[0])
+ #Create a port on the vswitch.
+ (new_port, ret_val) = switch_svc.CreateSwitchPort(
+ Name=str(uuid.uuid4()),
+ FriendlyName=vm_name,
+ ScopeOfResidence="",
+ VirtualSwitch=extswitch.path_())
+ if ret_val != 0:
+ LOG.error(_('Failed creating a port on the external vswitch'))
+ raise vmutils.HyperVException(_('Failed creating port for %s') %
+ vm_name)
+ ext_path = extswitch.path_()
+ LOG.debug(_("Created switch port %(vm_name)s on switch %(ext_path)s")
+ % locals())
+ #Connect the new nic to the new port.
+ new_nic_data.Connection = [new_port]
+ new_nic_data.ElementName = vm_name + ' nic'
+ new_nic_data.Address = mac
+ new_nic_data.StaticMacAddress = 'True'
+ new_nic_data.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}']
+ #Add the new nic to the vm.
+ new_resources = self._vmutils.add_virt_resource(self._conn,
+ new_nic_data, vm)
+ if new_resources is None:
+ raise vmutils.HyperVException(_('Failed to add nic to VM %s') %
+ vm_name)
+ LOG.info(_("Created nic for %s "), vm_name)
+
+ def _find_external_network(self):
+ """Find the vswitch that is connected to the physical nic.
+ Assumes only one physical nic on the host
+ """
+ #If there are no physical nics connected to networks, return.
+ LOG.debug(_("Attempting to bind NIC to %s ")
+ % FLAGS.vswitch_name)
+ if FLAGS.vswitch_name:
+ LOG.debug(_("Attempting to bind NIC to %s ")
+ % FLAGS.vswitch_name)
+ bound = self._conn.Msvm_VirtualSwitch(
+ ElementName=FLAGS.vswitch_name)
+ else:
+ LOG.debug(_("No vSwitch specified, attaching to default"))
+ self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE')
+ if len(bound) == 0:
+ return None
+ if FLAGS.vswitch_name:
+ return self._conn.Msvm_VirtualSwitch(
+ ElementName=FLAGS.vswitch_name)[0]\
+ .associators(wmi_result_class='Msvm_SwitchPort')[0]\
+ .associators(wmi_result_class='Msvm_VirtualSwitch')[0]
+ else:
+ return self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE')\
+ .associators(wmi_result_class='Msvm_SwitchPort')[0]\
+ .associators(wmi_result_class='Msvm_VirtualSwitch')[0]
+
+ def reboot(self, instance, network_info, reboot_type):
+ instance_name = instance["name"]
+ """Reboot the specified instance."""
+ vm = self._vmutils.lookup(self._conn, instance_name)
+ if vm is None:
+ raise exception.InstanceNotFound(instance_id=instance["id"])
+ self._set_vm_state(instance_name, 'Reboot')
+
+ def destroy(self, instance, network_info=None, cleanup=True):
+ """Destroy the VM. Also destroy the associated VHD disk files"""
+ instance_name = instance["name"]
+ LOG.debug(_("Got request to destroy vm %s"), instance_name)
+ vm = self._vmutils.lookup(self._conn, instance_name)
+ if vm is None:
+ return
+ vm = self._conn.Msvm_ComputerSystem(ElementName=instance_name)[0]
+ vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
+ #Stop the VM first.
+ self._set_vm_state(instance_name, 'Disabled')
+ vmsettings = vm.associators(
+ wmi_result_class='Msvm_VirtualSystemSettingData')
+ rasds = vmsettings[0].associators(
+ wmi_result_class='MSVM_ResourceAllocationSettingData')
+ disks = [r for r in rasds
+ if r.ResourceSubType == 'Microsoft Virtual Hard Disk']
+ disk_files = []
+ volumes = [r for r in rasds
+ if r.ResourceSubType == 'Microsoft Physical Disk Drive']
+ volumes_drives_list = []
+ #collect the volumes information before destroying the VM.
+ for volume in volumes:
+ hostResources = volume.HostResource
+ drive_path = hostResources[0]
+ #Appending the Msvm_Disk path
+ volumes_drives_list.append(drive_path)
+ #Collect disk file information before destroying the VM.
+ for disk in disks:
+ disk_files.extend([c for c in disk.Connection])
+ #Nuke the VM. Does not destroy disks.
+ (job, ret_val) = vs_man_svc.DestroyVirtualSystem(vm.path_())
+ if ret_val == constants.WMI_JOB_STATUS_STARTED:
+ success = self._vmutils.check_job_status(job)
+ elif ret_val == 0:
+ success = True
+ if not success:
+ raise vmutils.HyperVException(_('Failed to destroy vm %s') %
+ instance_name)
+ #Disconnect volumes
+ for volume_drive in volumes_drives_list:
+ self._volumeops.disconnect_volume(volume_drive)
+ #Delete associated vhd disk files.
+ for disk in disk_files:
+ vhdfile = self._conn_cimv2.query(
+ "Select * from CIM_DataFile where Name = '" +
+ disk.replace("'", "''") + "'")[0]
+ LOG.debug(_("Del: disk %(vhdfile)s vm %(instance_name)s")
+ % locals())
+ vhdfile.Delete()
+
+ def pause(self, instance):
+ """Pause VM instance."""
+ LOG.debug(_("Pause instance"), instance=instance)
+ self._set_vm_state(instance["name"], 'Paused')
+
+ def unpause(self, instance):
+ """Unpause paused VM instance."""
+ LOG.debug(_("Unpause instance"), instance=instance)
+ self._set_vm_state(instance["name"], 'Enabled')
+
+ def suspend(self, instance):
+ """Suspend the specified instance."""
+ print instance
+ LOG.debug(_("Suspend instance"), instance=instance)
+ self._set_vm_state(instance["name"], 'Suspended')
+
+ def resume(self, instance):
+ """Resume the suspended VM instance."""
+ LOG.debug(_("Resume instance"), instance=instance)
+ self._set_vm_state(instance["name"], 'Enabled')
+
+ def power_off(self, instance):
+ """Power off the specified instance."""
+ LOG.debug(_("Power off instance"), instance=instance)
+ self._set_vm_state(instance["name"], 'Disabled')
+
+ def power_on(self, instance):
+ """Power on the specified instance"""
+ LOG.debug(_("Power on instance"), instance=instance)
+ self._set_vm_state(instance["name"], 'Enabled')
+
+ def _set_vm_state(self, vm_name, req_state):
+ """Set the desired state of the VM"""
+ vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name)
+ if len(vms) == 0:
+ return False
+ (job, ret_val) = vms[0].RequestStateChange(
+ constants.REQ_POWER_STATE[req_state])
+ success = False
+ if ret_val == constants.WMI_JOB_STATUS_STARTED:
+ success = self._vmutils.check_job_status(job)
+ elif ret_val == 0:
+ success = True
+ elif ret_val == 32775:
+ #Invalid state for current operation. Typically means it is
+ #already in the state requested
+ success = True
+ if success:
+ LOG.info(_("Successfully changed vm state of %(vm_name)s"
+ " to %(req_state)s") % locals())
+ else:
+ msg = _("Failed to change vm state of %(vm_name)s"
+ " to %(req_state)s") % locals()
+ LOG.error(msg)
+ raise vmutils.HyperVException(msg)
+
+ def _get_vcpu_total(self):
+ """Get vcpu number of physical computer.
+ :returns: the number of cpu core.
+ """
+ # On certain platforms, this will raise a NotImplementedError.
+ try:
+ return multiprocessing.cpu_count()
+ except NotImplementedError:
+ LOG.warn(_("Cannot get the number of cpu, because this "
+ "function is not implemented for this platform. "
+ "This error can be safely ignored for now."))
+ return 0
+
+ def _get_memory_mb_total(self):
+ """Get the total memory size(MB) of physical computer.
+ :returns: the total amount of memory(MB).
+ """
+ total_kb = self._conn_cimv2.query(
+ "SELECT TotalVisibleMemorySize FROM win32_operatingsystem")[0]\
+ .TotalVisibleMemorySize
+ total_mb = long(total_kb) / 1024
+ return total_mb
+
+ def _get_local_gb_total(self):
+ """Get the total hdd size(GB) of physical computer.
+ :returns:
+ The total amount of HDD(GB).
+ Note that this value shows a partition where
+ NOVA-INST-DIR/instances mounts.
+ """
+ #TODO(jordanrinke): This binds to C only right now,
+ #need to bind to instance dir
+ total_kb = self._conn_cimv2.query(
+ "SELECT Size FROM win32_logicaldisk WHERE DriveType=3")[0].Size
+ total_gb = long(total_kb) / (1024 ** 3)
+ return total_gb
+
+ def _get_vcpu_used(self):
+ """ Get vcpu usage number of physical computer.
+ :returns: The total number of vcpu that currently used.
+ """
+ #TODO(jordanrinke) figure out a way to count assigned VCPUs
+ total_vcpu = 0
+ return total_vcpu
+
+ def _get_memory_mb_used(self):
+ """Get the free memory size(MB) of physical computer.
+ :returns: the total usage of memory(MB).
+ """
+ total_kb = self._conn_cimv2.query(
+ "SELECT FreePhysicalMemory FROM win32_operatingsystem")[0]\
+ .FreePhysicalMemory
+ total_mb = long(total_kb) / 1024
+
+ return total_mb
+
+ def _get_local_gb_used(self):
+ """Get the free hdd size(GB) of physical computer.
+ :returns:
+ The total usage of HDD(GB).
+ Note that this value shows a partition where
+ NOVA-INST-DIR/instances mounts.
+ """
+ #TODO(jordanrinke): This binds to C only right now,
+ #need to bind to instance dir
+ total_kb = self._conn_cimv2.query(
+ "SELECT FreeSpace FROM win32_logicaldisk WHERE DriveType=3")[0]\
+ .FreeSpace
+ total_gb = long(total_kb) / (1024 ** 3)
+ return total_gb
+
+ def _get_hypervisor_version(self):
+ """Get hypervisor version.
+ :returns: hypervisor version (ex. 12003)
+ """
+ version = self._conn_cimv2.Win32_OperatingSystem()[0]\
+ .Version.replace('.', '')
+ LOG.info(_('Windows version: %s ') % version)
+ return version
+
+ def update_available_resource(self, context, host):
+ """Updates compute manager resource info on ComputeNode table.
+
+ This method is called as an periodic tasks and is used only
+ in live migration currently.
+
+ :param ctxt: security context
+ :param host: hostname that compute manager is currently running
+
+ """
+
+ try:
+ service_ref = db.service_get_all_compute_by_host(context, host)[0]
+ except exception.NotFound:
+ raise exception.ComputeServiceUnavailable(host=host)
+
+ # Updating host information
+ # TODO(alexpilotti) implemented cpu_info
+ dic = {'vcpus': self._get_vcpu_total(),
+ 'memory_mb': self._get_memory_mb_total(),
+ 'local_gb': self._get_local_gb_total(),
+ 'vcpus_used': self._get_vcpu_used(),
+ 'memory_mb_used': self._get_memory_mb_used(),
+ 'local_gb_used': self._get_local_gb_used(),
+ 'hypervisor_type': "hyperv",
+ 'hypervisor_version': self._get_hypervisor_version(),
+ 'cpu_info': "unknown",
+ 'service_id': service_ref['id'],
+ 'disk_available_least': 1}
+
+ compute_node_ref = service_ref['compute_node']
+ if not compute_node_ref:
+ LOG.info(_('Compute_service record created for %s ') % host)
+ db.compute_node_create(context, dic)
+ else:
+ LOG.info(_('Compute_service record updated for %s ') % host)
+ db.compute_node_update(context, compute_node_ref[0]['id'], dic)
+
+ def _cache_image(self, fn, target, fname, cow=False, Size=None,
+ *args, **kwargs):
+ """Wrapper for a method that creates an image that caches the image.
+
+ This wrapper will save the image into a common store and create a
+ copy for use by the hypervisor.
+
+ The underlying method should specify a kwarg of target representing
+ where the image will be saved.
+
+ fname is used as the filename of the base image. The filename needs
+ to be unique to a given image.
+
+ If cow is True, it will make a CoW image instead of a copy.
+ """
+ @utils.synchronized(fname)
+ def call_if_not_exists(path, fn, *args, **kwargs):
+ if not os.path.exists(path):
+ fn(target=path, *args, **kwargs)
+
+ if not os.path.exists(target):
+ LOG.debug(_("use_cow_image:%s"), cow)
+ if cow:
+ base = self._vmutils.get_base_vhd_path(fname)
+ call_if_not_exists(base, fn, *args, **kwargs)
+
+ image_service = self._conn.query(
+ "Select * from Msvm_ImageManagementService")[0]
+ (job, ret_val) = \
+ image_service.CreateDifferencingVirtualHardDisk(
+ Path=target, ParentPath=base)
+ LOG.debug(
+ "Creating difference disk: JobID=%s, Source=%s, Target=%s",
+ job, base, target)
+ if ret_val == constants.WMI_JOB_STATUS_STARTED:
+ success = self._vmutils.check_job_status(job)
+ else:
+ success = (ret_val == 0)
+
+ if not success:
+ raise vmutils.HyperVException(
+ _('Failed to create Difference Disk from '
+ '%(base)s to %(target)s') % locals())
+
+ else:
+ call_if_not_exists(target, fn, *args, **kwargs)
diff --git a/nova/virt/hyperv/vmutils.py b/nova/virt/hyperv/vmutils.py
new file mode 100644
index 000000000..2e54e6d47
--- /dev/null
+++ b/nova/virt/hyperv/vmutils.py
@@ -0,0 +1,146 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 Cloudbase Solutions Srl / Pedro Navarro Perez
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Utility class for VM related operations.
+"""
+
+import os
+import shutil
+import sys
+import time
+import uuid
+
+from nova import exception
+from nova import flags
+from nova.openstack.common import log as logging
+from nova.virt.hyperv import constants
+from nova.virt import images
+
+# Check needed for unit testing on Unix
+if sys.platform == 'win32':
+ import wmi
+
+FLAGS = flags.FLAGS
+LOG = logging.getLogger(__name__)
+
+
+class HyperVException(exception.NovaException):
+ def __init__(self, message=None):
+ super(HyperVException, self).__init__(message)
+
+
+class VMUtils(object):
+ def lookup(self, conn, i):
+ vms = conn.Msvm_ComputerSystem(ElementName=i)
+ n = len(vms)
+ if n == 0:
+ return None
+ elif n > 1:
+ raise HyperVException(_('duplicate name found: %s') % i)
+ else:
+ return vms[0].ElementName
+
+ #TODO(alexpilotti): use the reactor to poll instead of sleep
+ def check_job_status(self, jobpath):
+ """Poll WMI job state for completion"""
+ job_wmi_path = jobpath.replace('\\', '/')
+ job = wmi.WMI(moniker=job_wmi_path)
+
+ while job.JobState == constants.WMI_JOB_STATE_RUNNING:
+ time.sleep(0.1)
+ job = wmi.WMI(moniker=job_wmi_path)
+ if job.JobState != constants.WMI_JOB_STATE_COMPLETED:
+ LOG.debug(_("WMI job failed: %(ErrorSummaryDescription)s - "
+ "%(ErrorDescription)s - %(ErrorCode)s") % job)
+ return False
+ desc = job.Description
+ elap = job.ElapsedTime
+ LOG.debug(_("WMI job succeeded: %(desc)s, Elapsed=%(elap)s ")
+ % locals())
+ return True
+
+ def get_vhd_path(self, instance_name):
+ base_vhd_folder = os.path.join(FLAGS.instances_path, instance_name)
+ if not os.path.exists(base_vhd_folder):
+ LOG.debug(_('Creating folder %s '), base_vhd_folder)
+ os.makedirs(base_vhd_folder)
+ return os.path.join(base_vhd_folder, instance_name + ".vhd")
+
+ def get_base_vhd_path(self, image_name):
+ base_dir = os.path.join(FLAGS.instances_path, '_base')
+ if not os.path.exists(base_dir):
+ os.makedirs(base_dir)
+ return os.path.join(base_dir, image_name + ".vhd")
+
+ def make_export_path(self, instance_name):
+ export_folder = os.path.join(FLAGS.instances_path, "export",
+ instance_name)
+ if os.path.isdir(export_folder):
+ LOG.debug(_('Removing existing folder %s '), export_folder)
+ shutil.rmtree(export_folder)
+ LOG.debug(_('Creating folder %s '), export_folder)
+ os.makedirs(export_folder)
+ return export_folder
+
+ def clone_wmi_obj(self, conn, wmi_class, wmi_obj):
+ """Clone a WMI object"""
+ cl = conn.__getattr__(wmi_class) # get the class
+ newinst = cl.new()
+ #Copy the properties from the original.
+ for prop in wmi_obj._properties:
+ if prop == "VirtualSystemIdentifiers":
+ strguid = []
+ strguid.append(str(uuid.uuid4()))
+ newinst.Properties_.Item(prop).Value = strguid
+ else:
+ newinst.Properties_.Item(prop).Value = \
+ wmi_obj.Properties_.Item(prop).Value
+ return newinst
+
+ def add_virt_resource(self, conn, res_setting_data, target_vm):
+ """Add a new resource (disk/nic) to the VM"""
+ vs_man_svc = conn.Msvm_VirtualSystemManagementService()[0]
+ (job, new_resources, ret_val) = vs_man_svc.\
+ AddVirtualSystemResources([res_setting_data.GetText_(1)],
+ target_vm.path_())
+ success = True
+ if ret_val == constants.WMI_JOB_STATUS_STARTED:
+ success = self.check_job_status(job)
+ else:
+ success = (ret_val == 0)
+ if success:
+ return new_resources
+ else:
+ return None
+
+ def remove_virt_resource(self, conn, res_setting_data, target_vm):
+ """Add a new resource (disk/nic) to the VM"""
+ vs_man_svc = conn.Msvm_VirtualSystemManagementService()[0]
+ (job, ret_val) = vs_man_svc.\
+ RemoveVirtualSystemResources([res_setting_data.path_()],
+ target_vm.path_())
+ success = True
+ if ret_val == constants.WMI_JOB_STATUS_STARTED:
+ success = self.check_job_status(job)
+ else:
+ success = (ret_val == 0)
+ return success
+
+ def fetch_image(self, target, context, image_id, user, project,
+ *args, **kwargs):
+ images.fetch(context, image_id, target, user, project)
diff --git a/nova/virt/hyperv/volumeops.py b/nova/virt/hyperv/volumeops.py
new file mode 100644
index 000000000..a8e5299c0
--- /dev/null
+++ b/nova/virt/hyperv/volumeops.py
@@ -0,0 +1,297 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 Pedro Navarro Perez
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Management class for Storage-related functions (attach, detach, etc).
+"""
+import time
+
+from nova import block_device
+from nova import flags
+from nova.openstack.common import cfg
+from nova.openstack.common import log as logging
+from nova.virt import driver
+from nova.virt.hyperv import baseops
+from nova.virt.hyperv import vmutils
+from nova.virt.hyperv import volumeutils
+
+LOG = logging.getLogger(__name__)
+
+hyper_volumeops_opts = [
+ cfg.StrOpt('hyperv_attaching_volume_retry_count',
+ default=10,
+ help='The number of times we retry on attaching volume '),
+ cfg.StrOpt('hyperv_wait_between_attach_retry',
+ default=5,
+ help='The seconds to wait between an volume attachment attempt'),
+ ]
+
+FLAGS = flags.FLAGS
+FLAGS.register_opts(hyper_volumeops_opts)
+
+
+class VolumeOps(baseops.BaseOps):
+ """
+ Management class for Volume-related tasks
+ """
+
+ def __init__(self):
+ super(VolumeOps, self).__init__()
+
+ self._vmutils = vmutils.VMUtils()
+ self._driver = driver
+ self._block_device = block_device
+ self._time = time
+ self._initiator = None
+ self._default_root_device = 'vda'
+ self._attaching_volume_retry_count = \
+ FLAGS.hyperv_attaching_volume_retry_count
+ self._wait_between_attach_retry = \
+ FLAGS.hyperv_wait_between_attach_retry
+ self._volutils = volumeutils.VolumeUtils()
+
+ def attach_boot_volume(self, block_device_info, vm_name):
+ """Attach the boot volume to the IDE controller"""
+ LOG.debug(_("block device info: %s"), block_device_info)
+ ebs_root = self._driver.block_device_info_get_mapping(
+ block_device_info)[0]
+ connection_info = ebs_root['connection_info']
+ data = connection_info['data']
+ target_lun = data['target_lun']
+ target_iqn = data['target_iqn']
+ target_portal = data['target_portal']
+ self._volutils.login_storage_target(target_lun, target_iqn,
+ target_portal)
+ try:
+ #Getting the mounted disk
+ mounted_disk = self._get_mounted_disk_from_lun(target_iqn,
+ target_lun)
+ #Attach to IDE controller
+ #Find the IDE controller for the vm.
+ vms = self._conn.MSVM_ComputerSystem(ElementName=vm_name)
+ vm = vms[0]
+ vmsettings = vm.associators(
+ wmi_result_class='Msvm_VirtualSystemSettingData')
+ rasds = vmsettings[0].associators(
+ wmi_result_class='MSVM_ResourceAllocationSettingData')
+ ctrller = [r for r in rasds
+ if r.ResourceSubType == 'Microsoft Emulated IDE Controller'
+ and r.Address == "0"]
+ #Attaching to the same slot as the VHD disk file
+ self._attach_volume_to_controller(ctrller, 0, mounted_disk, vm)
+ except Exception as exn:
+ LOG.exception(_('Attach boot from volume failed: %s'), exn)
+ self._volutils.logout_storage_target(self._conn_wmi, target_iqn)
+ raise vmutils.HyperVException(
+ _('Unable to attach boot volume to instance %s')
+ % vm_name)
+
+ def volume_in_mapping(self, mount_device, block_device_info):
+ return self._volutils.volume_in_mapping(mount_device,
+ block_device_info)
+
+ def attach_volume(self, connection_info, instance_name, mountpoint):
+ """Attach a volume to the SCSI controller"""
+ LOG.debug(_("Attach_volume: %(connection_info)s, %(instance_name)s,"
+ " %(mountpoint)s") % locals())
+ data = connection_info['data']
+ target_lun = data['target_lun']
+ target_iqn = data['target_iqn']
+ target_portal = data['target_portal']
+ self._volutils.login_storage_target(target_lun, target_iqn,
+ target_portal)
+ try:
+ #Getting the mounted disk
+ mounted_disk = self._get_mounted_disk_from_lun(target_iqn,
+ target_lun)
+ #Find the SCSI controller for the vm
+ vms = self._conn.MSVM_ComputerSystem(ElementName=instance_name)
+ vm = vms[0]
+ vmsettings = vm.associators(
+ wmi_result_class='Msvm_VirtualSystemSettingData')
+ rasds = vmsettings[0].associators(
+ wmi_result_class='MSVM_ResourceAllocationSettingData')
+ ctrller = [r for r in rasds
+ if r.ResourceSubType == 'Microsoft Synthetic SCSI Controller']
+ self._attach_volume_to_controller(
+ ctrller, self._get_free_controller_slot(ctrller[0]),
+ mounted_disk, vm)
+ except Exception as exn:
+ LOG.exception(_('Attach volume failed: %s'), exn)
+ self._volutils.logout_storage_target(self._conn_wmi, target_iqn)
+ raise vmutils.HyperVException(
+ _('Unable to attach volume to instance %s')
+ % instance_name)
+
+ def _attach_volume_to_controller(self, controller, address, mounted_disk,
+ instance):
+ """Attach a volume to a controller """
+ #Find the default disk drive object for the vm and clone it.
+ diskdflt = self._conn.query(
+ "SELECT * FROM Msvm_ResourceAllocationSettingData \
+ WHERE ResourceSubType LIKE 'Microsoft Physical Disk Drive'\
+ AND InstanceID LIKE '%Default%'")[0]
+ diskdrive = self._vmutils.clone_wmi_obj(self._conn,
+ 'Msvm_ResourceAllocationSettingData', diskdflt)
+ diskdrive.Address = address
+ diskdrive.Parent = controller[0].path_()
+ diskdrive.HostResource = [mounted_disk[0].path_()]
+ new_resources = self._vmutils.add_virt_resource(self._conn, diskdrive,
+ instance)
+ if new_resources is None:
+ raise vmutils.HyperVException(_('Failed to add volume to VM %s') %
+ instance)
+
+ def _get_free_controller_slot(self, scsi_controller):
+ #Getting volumes mounted in the SCSI controller
+ volumes = self._conn.query(
+ "SELECT * FROM Msvm_ResourceAllocationSettingData \
+ WHERE ResourceSubType LIKE 'Microsoft Physical Disk Drive'\
+ AND Parent = '" + scsi_controller.path_() + "'")
+ #Slots starts from 0, so the lenght of the disks gives us the free slot
+ return len(volumes)
+
+ def detach_volume(self, connection_info, instance_name, mountpoint):
+ """Dettach a volume to the SCSI controller"""
+ LOG.debug(_("Detach_volume: %(connection_info)s, %(instance_name)s,"
+ " %(mountpoint)s") % locals())
+ data = connection_info['data']
+ target_lun = data['target_lun']
+ target_iqn = data['target_iqn']
+ #Getting the mounted disk
+ mounted_disk = self._get_mounted_disk_from_lun(target_iqn, target_lun)
+ physical_list = self._conn.query(
+ "SELECT * FROM Msvm_ResourceAllocationSettingData \
+ WHERE ResourceSubType LIKE 'Microsoft Physical Disk Drive'")
+ physical_disk = 0
+ for phydisk in physical_list:
+ host_resource_list = phydisk.HostResource
+ if host_resource_list is None:
+ continue
+ host_resource = str(host_resource_list[0].lower())
+ mounted_disk_path = str(mounted_disk[0].path_().lower())
+ LOG.debug(_("Mounted disk to detach is: %s"), mounted_disk_path)
+ LOG.debug(_("host_resource disk detached is: %s"), host_resource)
+ if host_resource == mounted_disk_path:
+ physical_disk = phydisk
+ LOG.debug(_("Physical disk detached is: %s"), physical_disk)
+ vms = self._conn.MSVM_ComputerSystem(ElementName=instance_name)
+ vm = vms[0]
+ remove_result = self._vmutils.remove_virt_resource(self._conn,
+ physical_disk, vm)
+ if remove_result is False:
+ raise vmutils.HyperVException(
+ _('Failed to remove volume from VM %s') %
+ instance_name)
+ #Sending logout
+ self._volutils.logout_storage_target(self._conn_wmi, target_iqn)
+
+ def get_volume_connector(self, instance):
+ if not self._initiator:
+ self._initiator = self._get_iscsi_initiator()
+ if not self._initiator:
+ LOG.warn(_('Could not determine iscsi initiator name'),
+ instance=instance)
+ return {
+ 'ip': FLAGS.my_ip,
+ 'initiator': self._initiator,
+ }
+
+ def _get_iscsi_initiator(self):
+ return self._volutils.get_iscsi_initiator(self._conn_cimv2)
+
+ def _get_mounted_disk_from_lun(self, target_iqn, target_lun):
+ initiator_session = self._conn_wmi.query(
+ "SELECT * FROM MSiSCSIInitiator_SessionClass \
+ WHERE TargetName='" + target_iqn + "'")[0]
+ devices = initiator_session.Devices
+ device_number = None
+ for device in devices:
+ LOG.debug(_("device.InitiatorName: %s"), device.InitiatorName)
+ LOG.debug(_("device.TargetName: %s"), device.TargetName)
+ LOG.debug(_("device.ScsiPortNumber: %s"), device.ScsiPortNumber)
+ LOG.debug(_("device.ScsiPathId: %s"), device.ScsiPathId)
+ LOG.debug(_("device.ScsiTargetId): %s"), device.ScsiTargetId)
+ LOG.debug(_("device.ScsiLun: %s"), device.ScsiLun)
+ LOG.debug(_("device.DeviceInterfaceGuid :%s"),
+ device.DeviceInterfaceGuid)
+ LOG.debug(_("device.DeviceInterfaceName: %s"),
+ device.DeviceInterfaceName)
+ LOG.debug(_("device.LegacyName: %s"), device.LegacyName)
+ LOG.debug(_("device.DeviceType: %s"), device.DeviceType)
+ LOG.debug(_("device.DeviceNumber %s"), device.DeviceNumber)
+ LOG.debug(_("device.PartitionNumber :%s"), device.PartitionNumber)
+ scsi_lun = device.ScsiLun
+ if scsi_lun == target_lun:
+ device_number = device.DeviceNumber
+ if device_number is None:
+ raise vmutils.HyperVException(
+ _('Unable to find a mounted disk for'
+ ' target_iqn: %s') % target_iqn)
+ LOG.debug(_("Device number : %s"), device_number)
+ LOG.debug(_("Target lun : %s"), target_lun)
+ #Finding Mounted disk drive
+ for i in range(1, self._attaching_volume_retry_count):
+ mounted_disk = self._conn.query(
+ "SELECT * FROM Msvm_DiskDrive WHERE DriveNumber=" +
+ str(device_number) + "")
+ LOG.debug(_("Mounted disk is: %s"), mounted_disk)
+ if len(mounted_disk) > 0:
+ break
+ self._time.sleep(self._wait_between_attach_retry)
+ mounted_disk = self._conn.query(
+ "SELECT * FROM Msvm_DiskDrive WHERE DriveNumber=" +
+ str(device_number) + "")
+ LOG.debug(_("Mounted disk is: %s"), mounted_disk)
+ if len(mounted_disk) == 0:
+ raise vmutils.HyperVException(
+ _('Unable to find a mounted disk for'
+ ' target_iqn: %s') % target_iqn)
+ return mounted_disk
+
+ def disconnect_volume(self, physical_drive_path):
+ #Get the session_id of the ISCSI connection
+ session_id = self._get_session_id_from_mounted_disk(
+ physical_drive_path)
+ #Logging out the target
+ self._volutils.execute_log_out(session_id)
+
+ def _get_session_id_from_mounted_disk(self, physical_drive_path):
+ drive_number = self._get_drive_number_from_disk_path(
+ physical_drive_path)
+ LOG.debug(_("Drive number to disconnect is: %s"), drive_number)
+ initiator_sessions = self._conn_wmi.query(
+ "SELECT * FROM MSiSCSIInitiator_SessionClass")
+ for initiator_session in initiator_sessions:
+ devices = initiator_session.Devices
+ for device in devices:
+ deviceNumber = str(device.DeviceNumber)
+ LOG.debug(_("DeviceNumber : %s"), deviceNumber)
+ if deviceNumber == drive_number:
+ return initiator_session.SessionId
+
+ def _get_drive_number_from_disk_path(self, disk_path):
+ LOG.debug(_("Disk path to parse: %s"), disk_path)
+ start_device_id = disk_path.find('"', disk_path.find('DeviceID'))
+ LOG.debug(_("start_device_id: %s"), start_device_id)
+ end_device_id = disk_path.find('"', start_device_id + 1)
+ LOG.debug(_("end_device_id: %s"), end_device_id)
+ deviceID = disk_path[start_device_id + 1:end_device_id]
+ return deviceID[deviceID.find("\\") + 2:]
+
+ def get_default_root_device(self):
+ return self._default_root_device
diff --git a/nova/virt/hyperv/volumeutils.py b/nova/virt/hyperv/volumeutils.py
new file mode 100644
index 000000000..018a4c278
--- /dev/null
+++ b/nova/virt/hyperv/volumeutils.py
@@ -0,0 +1,122 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright 2012 Pedro Navarro Perez
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Helper methods for operations related to the management of volumes,
+and storage repositories
+"""
+
+import subprocess
+import sys
+import time
+
+from nova import block_device
+from nova import flags
+from nova.openstack.common import log as logging
+from nova.virt import driver
+from nova.virt.hyperv import vmutils
+
+# Check needed for unit testing on Unix
+if sys.platform == 'win32':
+ import _winreg
+
+LOG = logging.getLogger(__name__)
+FLAGS = flags.FLAGS
+
+
+class VolumeUtils(object):
+ def execute(self, *args, **kwargs):
+ proc = subprocess.Popen(
+ [args],
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ )
+ stdout_value, stderr_value = proc.communicate()
+ if stdout_value.find('The operation completed successfully') == -1:
+ raise vmutils.HyperVException(_('An error has occurred when '
+ 'calling the iscsi initiator: %s') % stdout_value)
+
+ def get_iscsi_initiator(self, cim_conn):
+ """Get iscsi initiator name for this machine"""
+
+ computer_system = cim_conn.Win32_ComputerSystem()[0]
+ hostname = computer_system.name
+ keypath = \
+ r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\iSCSI\Discovery"
+ try:
+ key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, keypath, 0,
+ _winreg.KEY_ALL_ACCESS)
+ temp = _winreg.QueryValueEx(key, 'DefaultInitiatorName')
+ initiator_name = str(temp[0])
+ _winreg.CloseKey(key)
+ except Exception:
+ LOG.info(_("The ISCSI initiator name can't be found. "
+ "Choosing the default one"))
+ computer_system = cim_conn.Win32_ComputerSystem()[0]
+ initiator_name = "iqn.1991-05.com.microsoft:" + \
+ hostname.lower()
+ return {
+ 'ip': FLAGS.my_ip,
+ 'initiator': initiator_name,
+ }
+
+ def login_storage_target(self, target_lun, target_iqn, target_portal):
+ """Add target portal, list targets and logins to the target"""
+ separator = target_portal.find(':')
+ target_address = target_portal[:separator]
+ target_port = target_portal[separator + 1:]
+ #Adding target portal to iscsi initiator. Sending targets
+ self.execute('iscsicli.exe ' + 'AddTargetPortal ' +
+ target_address + ' ' + target_port +
+ ' * * * * * * * * * * * * *')
+ #Listing targets
+ self.execute('iscsicli.exe ' + 'LisTargets')
+ #Sending login
+ self.execute('iscsicli.exe ' + 'qlogintarget ' + target_iqn)
+ #Waiting the disk to be mounted. Research this
+ time.sleep(FLAGS.hyperv_wait_between_attach_retry)
+
+ def logout_storage_target(self, _conn_wmi, target_iqn):
+ """ Logs out storage target through its session id """
+
+ sessions = _conn_wmi.query(
+ "SELECT * FROM MSiSCSIInitiator_SessionClass \
+ WHERE TargetName='" + target_iqn + "'")
+ for session in sessions:
+ self.execute_log_out(session.SessionId)
+
+ def execute_log_out(self, session_id):
+ """ Executes log out of the session described by its session ID """
+ self.execute('iscsicli.exe ' + 'logouttarget ' + session_id)
+
+ def volume_in_mapping(self, mount_device, block_device_info):
+ block_device_list = [block_device.strip_dev(vol['mount_device'])
+ for vol in
+ driver.block_device_info_get_mapping(
+ block_device_info)]
+ swap = driver.block_device_info_get_swap(block_device_info)
+ if driver.swap_is_usable(swap):
+ block_device_list.append(
+ block_device.strip_dev(swap['device_name']))
+ block_device_list += [block_device.strip_dev(
+ ephemeral['device_name'])
+ for ephemeral in
+ driver.block_device_info_get_ephemerals(block_device_info)]
+
+ LOG.debug(_("block_device_list %s"), block_device_list)
+ return block_device.strip_dev(mount_device) in block_device_list
diff --git a/nova/virt/libvirt/config.py b/nova/virt/libvirt/config.py
index a53ed640f..8a924df56 100644
--- a/nova/virt/libvirt/config.py
+++ b/nova/virt/libvirt/config.py
@@ -133,6 +133,7 @@ class LibvirtConfigCapsGuest(LibvirtConfigObject):
self.arch = None
self.ostype = None
+ self.domtype = list()
def parse_dom(self, xmldoc):
super(LibvirtConfigCapsGuest, self).parse_dom(xmldoc)
@@ -142,6 +143,9 @@ class LibvirtConfigCapsGuest(LibvirtConfigObject):
self.ostype = c.text
elif c.tag == "arch":
self.arch = c.get("name")
+ for sc in c.getchildren():
+ if sc.tag == "domain":
+ self.domtype.append(sc.get("type"))
def format_dom(self):
caps = super(LibvirtConfigCapsGuest, self).format_dom()
@@ -150,6 +154,10 @@ class LibvirtConfigCapsGuest(LibvirtConfigObject):
caps.append(self._text_node("os_type", self.ostype))
if self.arch:
arch = etree.Element("arch", name=self.arch)
+ for dt in self.domtype:
+ dte = etree.Element("domain")
+ dte.set("type", dt)
+ arch.append(dte)
caps.append(arch)
return caps
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index bf3cec403..1e2706841 100644
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -810,7 +810,8 @@ class LibvirtDriver(driver.ComputeDriver):
image_file)
@exception.wrap_exception()
- def reboot(self, instance, network_info, reboot_type='SOFT'):
+ def reboot(self, instance, network_info, reboot_type='SOFT',
+ block_device_info=None):
"""Reboot a virtual machine, given an instance reference."""
if reboot_type == 'SOFT':
# NOTE(vish): This will attempt to do a graceful shutdown/restart.
@@ -821,7 +822,7 @@ class LibvirtDriver(driver.ComputeDriver):
else:
LOG.warn(_("Failed to soft reboot instance."),
instance=instance)
- return self._hard_reboot(instance)
+ return self._hard_reboot(instance, block_device_info=block_device_info)
def _soft_reboot(self, instance):
"""Attempt to shutdown and restart the instance gracefully.
@@ -858,7 +859,7 @@ class LibvirtDriver(driver.ComputeDriver):
greenthread.sleep(1)
return False
- def _hard_reboot(self, instance, xml=None):
+ def _hard_reboot(self, instance, xml=None, block_device_info=None):
"""Reboot a virtual machine, given an instance reference.
Performs a Libvirt reset (if supported) on the domain.
@@ -871,17 +872,23 @@ class LibvirtDriver(driver.ComputeDriver):
existing domain.
"""
+ block_device_mapping = driver.block_device_info_get_mapping(
+ block_device_info)
+
+ for vol in block_device_mapping:
+ connection_info = vol['connection_info']
+ mount_device = vol['mount_device'].rpartition("/")[2]
+ self.volume_driver_method('connect_volume',
+ connection_info,
+ mount_device)
+
virt_dom = self._lookup_by_name(instance['name'])
# NOTE(itoumsn): Use XML delived from the running instance.
if not xml:
xml = virt_dom.XMLDesc(0)
- # NOTE(dprince): reset was added in Libvirt 0.9.7
- if hasattr(virt_dom, 'reset'):
- virt_dom.reset(0)
- else:
- self._destroy(instance)
- self._create_domain(xml, virt_dom)
+ self._destroy(instance)
+ self._create_domain(xml, virt_dom)
def _wait_for_reboot():
"""Called at an interval until the VM is running again."""
@@ -938,11 +945,13 @@ class LibvirtDriver(driver.ComputeDriver):
self._create_domain(domain=dom)
@exception.wrap_exception()
- def resume_state_on_host_boot(self, context, instance, network_info):
+ def resume_state_on_host_boot(self, context, instance, network_info,
+ block_device_info=None):
"""resume guest state when a host is booted"""
virt_dom = self._lookup_by_name(instance['name'])
xml = virt_dom.XMLDesc(0)
- self._create_domain_and_network(xml, instance, network_info)
+ self._create_domain_and_network(xml, instance, network_info,
+ block_device_info)
@exception.wrap_exception()
def rescue(self, context, instance, network_info, image_meta,
@@ -1021,7 +1030,8 @@ class LibvirtDriver(driver.ComputeDriver):
block_device_info=block_device_info,
files=injected_files,
admin_pass=admin_password)
- self._create_domain_and_network(xml, instance, network_info)
+ self._create_domain_and_network(xml, instance, network_info,
+ block_device_info)
LOG.debug(_("Instance is running"), instance=instance)
def _wait_for_boot():
@@ -1834,8 +1844,20 @@ class LibvirtDriver(driver.ComputeDriver):
domain.createWithFlags(launch_flags)
return domain
- def _create_domain_and_network(self, xml, instance, network_info):
+ def _create_domain_and_network(self, xml, instance, network_info,
+ block_device_info=None):
+
"""Do required network setup and create domain."""
+ block_device_mapping = driver.block_device_info_get_mapping(
+ block_device_info)
+
+ for vol in block_device_mapping:
+ connection_info = vol['connection_info']
+ mount_device = vol['mount_device'].rpartition("/")[2]
+ self.volume_driver_method('connect_volume',
+ connection_info,
+ mount_device)
+
self.plug_vifs(instance, network_info)
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance, network_info)
@@ -2055,6 +2077,24 @@ class LibvirtDriver(driver.ComputeDriver):
"""Returns the hostname of the hypervisor."""
return self._conn.getHostname()
+ def get_instance_capabilities(self):
+ """Get hypervisor instance capabilities
+
+ Returns a list of tuples that describe instances the
+ hypervisor is capable of hosting. Each tuple consists
+ of the triplet (arch, hypervisor_type, vm_mode).
+
+ :returns: List of tuples describing instance capabilities
+ """
+ caps = self.get_host_capabilities()
+ instance_caps = list()
+ for g in caps.guests:
+ for dt in g.domtype:
+ instance_cap = (g.arch, dt, g.ostype)
+ instance_caps.append(instance_cap)
+
+ return instance_caps
+
def get_cpu_info(self):
"""Get cpuinfo information.
@@ -2991,6 +3031,8 @@ class HostState(object):
data["hypervisor_type"] = self.connection.get_hypervisor_type()
data["hypervisor_version"] = self.connection.get_hypervisor_version()
data["hypervisor_hostname"] = self.connection.get_hypervisor_hostname()
+ data["supported_instances"] = \
+ self.connection.get_instance_capabilities()
self._stats = data
diff --git a/nova/virt/libvirt/utils.py b/nova/virt/libvirt/utils.py
index 5adfe4cae..997628405 100644
--- a/nova/virt/libvirt/utils.py
+++ b/nova/virt/libvirt/utils.py
@@ -22,7 +22,6 @@
import errno
import hashlib
import os
-import random
import re
from nova import exception
diff --git a/nova/virt/powervm/driver.py b/nova/virt/powervm/driver.py
index 66fd8929e..1b3eba415 100644
--- a/nova/virt/powervm/driver.py
+++ b/nova/virt/powervm/driver.py
@@ -138,7 +138,8 @@ class PowerVMDriver(driver.ComputeDriver):
"""Destroy (shutdown and delete) the specified instance."""
self._powervm.destroy(instance['name'])
- def reboot(self, instance, network_info, reboot_type):
+ def reboot(self, instance, network_info, reboot_type,
+ block_device_info=None):
"""Reboot the specified instance.
:param instance: Instance object as returned by DB layer.
diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py
index 112de9a93..947bd3422 100644
--- a/nova/virt/vmwareapi/driver.py
+++ b/nova/virt/vmwareapi/driver.py
@@ -135,7 +135,8 @@ class VMWareESXDriver(driver.ComputeDriver):
"""Create snapshot from a running VM instance."""
self._vmops.snapshot(context, instance, name)
- def reboot(self, instance, network_info, reboot_type):
+ def reboot(self, instance, network_info, reboot_type,
+ block_device_info=None):
"""Reboot VM instance."""
self._vmops.reboot(instance, network_info)
diff --git a/nova/virt/xenapi/driver.py b/nova/virt/xenapi/driver.py
index 2f472fc7b..3709c13af 100644
--- a/nova/virt/xenapi/driver.py
+++ b/nova/virt/xenapi/driver.py
@@ -197,7 +197,8 @@ class XenAPIDriver(driver.ComputeDriver):
""" Create snapshot from a running VM instance """
self._vmops.snapshot(context, instance, image_id)
- def reboot(self, instance, network_info, reboot_type):
+ def reboot(self, instance, network_info, reboot_type,
+ block_device_info=None):
"""Reboot VM instance"""
self._vmops.reboot(instance, reboot_type)
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 10cca68f4..41e1f6dd6 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -42,7 +42,6 @@ from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import utils
-from nova.virt import driver
from nova.virt.xenapi import agent
from nova.virt.xenapi import firewall
from nova.virt.xenapi import pool_states
@@ -303,6 +302,18 @@ class VMOps(object):
undo_mgr.undo_with(undo_create_vm)
return vm_ref
+ if rescue:
+ # NOTE(johannes): Attach root disk to rescue VM now, before
+ # booting the VM, since we can't hotplug block devices
+ # on non-PV guests
+ @step
+ def attach_root_disk_step(undo_mgr, vm_ref):
+ orig_vm_ref = vm_utils.lookup(self._session, instance['name'])
+ vdi_ref = self._find_root_vdi_ref(orig_vm_ref)
+
+ vm_utils.create_vbd(self._session, vm_ref, vdi_ref,
+ DEVICE_RESCUE, bootable=False)
+
@step
def prepare_security_group_filters_step(undo_mgr):
try:
@@ -345,6 +356,9 @@ class VMOps(object):
vm_ref = create_vm_step(undo_mgr, vdis, kernel_file, ramdisk_file)
prepare_security_group_filters_step(undo_mgr)
+ if rescue:
+ attach_root_disk_step(undo_mgr, vm_ref)
+
boot_instance_step(undo_mgr, vm_ref)
apply_security_group_filters_step(undo_mgr)
@@ -1054,13 +1068,6 @@ class VMOps(object):
self._acquire_bootlock(vm_ref)
self.spawn(context, instance, image_meta, [], rescue_password,
network_info, name_label=rescue_name_label, rescue=True)
- rescue_vm_ref = vm_utils.lookup(self._session, rescue_name_label)
- vdi_ref = self._find_root_vdi_ref(vm_ref)
-
- rescue_vbd_ref = vm_utils.create_vbd(self._session, rescue_vm_ref,
- vdi_ref, DEVICE_RESCUE,
- bootable=False)
- self._session.call_xenapi('VBD.plug', rescue_vbd_ref)
def unrescue(self, instance):
"""Unrescue the specified instance.
diff --git a/nova/volume/netapp.py b/nova/volume/netapp.py
index 380f2fbc4..6dd5c0e31 100644
--- a/nova/volume/netapp.py
+++ b/nova/volume/netapp.py
@@ -23,7 +23,6 @@ ONTAP 7-mode storage systems with installed iSCSI licenses.
"""
-import string
import time
import suds
diff --git a/nova/volume/storwize_svc.py b/nova/volume/storwize_svc.py
index f87500f9b..d1958a702 100644
--- a/nova/volume/storwize_svc.py
+++ b/nova/volume/storwize_svc.py
@@ -49,7 +49,6 @@ from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common import log as logging
-from nova import utils
from nova.volume import san
LOG = logging.getLogger(__name__)
diff --git a/run_tests.sh b/run_tests.sh
index 2784bb374..0b7f52a55 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -67,6 +67,10 @@ export NOSE_OPENSTACK_YELLOW=0.025
export NOSE_OPENSTACK_SHOW_ELAPSED=1
export NOSE_OPENSTACK_STDOUT=1
+export LANG=en_US.UTF-8
+export LANGUAGE=en_US:en
+export LC_ALL=C
+
for arg in "$@"; do
process_option $arg
done
@@ -95,8 +99,6 @@ function run_tests {
cat run_tests.log
fi
fi
- # cleanup locks - not really needed, but stops pollution of the source tree
- rm -f nova-ensure_bridge nova-ensure_vlan nova-iptables nova-testlock1 nova-testlock2
return $RESULT
}
diff --git a/tools/hacking.py b/tools/hacking.py
index 4eb2141f7..88d074794 100755
--- a/tools/hacking.py
+++ b/tools/hacking.py
@@ -47,7 +47,7 @@ logging.disable('LOG')
IMPORT_EXCEPTIONS = ['sqlalchemy', 'migrate', 'nova.db.sqlalchemy.session']
DOCSTRING_TRIPLE = ['"""', "'''"]
-VERBOSE_MISSING_IMPORT = False
+VERBOSE_MISSING_IMPORT = os.getenv('HACKING_VERBOSE_MISSING_IMPORT', 'False')
# Monkey patch broken excluded filter in pep8
@@ -218,9 +218,10 @@ def nova_import_module_only(logical_line):
else:
name = logical_line.split()[1]
if name not in _missingImport:
- if VERBOSE_MISSING_IMPORT:
- print >> sys.stderr, ("ERROR: import '%s' failed: %s" %
- (name, exc))
+ if VERBOSE_MISSING_IMPORT != 'False':
+ print >> sys.stderr, ("ERROR: import '%s' in %s "
+ "failed: %s" %
+ (name, pep8.current_file, exc))
_missingImport.add(name)
added = False
sys.path.pop()
diff --git a/tox.ini b/tox.ini
index b95decf9e..4957ce39a 100644
--- a/tox.ini
+++ b/tox.ini
@@ -9,6 +9,10 @@ setenv = VIRTUAL_ENV={envdir}
NOSE_OPENSTACK_YELLOW=0.025
NOSE_OPENSTACK_SHOW_ELAPSED=1
NOSE_OPENSTACK_STDOUT=1
+ LANG=en_US.UTF-8
+ LANGUAGE=en_US:en
+ LC_ALL=C
+
deps = -r{toxinidir}/tools/pip-requires
-r{toxinidir}/tools/test-requires
commands = nosetests {posargs}