summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore1
-rw-r--r--HACKING.rst1
-rwxr-xr-xbin/nova-all1
-rwxr-xr-xbin/nova-api1
-rwxr-xr-xbin/nova-api-ec21
-rwxr-xr-xbin/nova-api-metadata1
-rwxr-xr-xbin/nova-api-os-compute1
-rwxr-xr-xbin/nova-cert1
-rwxr-xr-xbin/nova-clear-rabbit-queues3
-rwxr-xr-xbin/nova-compute1
-rwxr-xr-xbin/nova-conductor51
-rwxr-xr-xbin/nova-console1
-rwxr-xr-xbin/nova-consoleauth3
-rwxr-xr-xbin/nova-dhcpbridge3
-rwxr-xr-xbin/nova-manage142
-rwxr-xr-xbin/nova-network1
-rwxr-xr-xbin/nova-novncproxy1
-rwxr-xr-xbin/nova-objectstore1
-rwxr-xr-xbin/nova-rpc-zmq-receiver1
-rwxr-xr-xbin/nova-scheduler1
-rwxr-xr-xbin/nova-xvpvncproxy1
-rw-r--r--doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.json16
-rw-r--r--doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.xml6
-rw-r--r--doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.json16
-rw-r--r--doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.xml6
-rw-r--r--doc/api_samples/all_extensions/extensions-get-resp.json10
-rw-r--r--doc/api_samples/all_extensions/extensions-get-resp.xml5
-rw-r--r--doc/api_samples/all_extensions/server-get-resp.json16
-rw-r--r--doc/api_samples/all_extensions/server-get-resp.xml6
-rw-r--r--doc/api_samples/all_extensions/servers-details-resp.json17
-rw-r--r--doc/api_samples/all_extensions/servers-details-resp.xml6
-rw-r--r--doc/api_samples/limit-get-resp.json2
-rw-r--r--doc/api_samples/limit-get-resp.xml2
-rw-r--r--doc/api_samples/os-consoles/get-vnc-console-post-req.json5
-rw-r--r--doc/api_samples/os-consoles/get-vnc-console-post-req.xml2
-rw-r--r--doc/api_samples/os-consoles/get-vnc-console-post-resp.json6
-rw-r--r--doc/api_samples/os-consoles/get-vnc-console-post-resp.xml5
-rw-r--r--doc/api_samples/os-consoles/server-post-req.json16
-rw-r--r--doc/api_samples/os-consoles/server-post-req.xml19
-rw-r--r--doc/api_samples/os-consoles/server-post-resp.json16
-rw-r--r--doc/api_samples/os-consoles/server-post-resp.xml6
-rw-r--r--doc/api_samples/os-server-start-stop/server-post-req.json16
-rw-r--r--doc/api_samples/os-server-start-stop/server-post-req.xml19
-rw-r--r--doc/api_samples/os-server-start-stop/server-post-resp.json16
-rw-r--r--doc/api_samples/os-server-start-stop/server-post-resp.xml6
-rw-r--r--doc/api_samples/os-server-start-stop/server_start_stop.xml2
-rw-r--r--doc/api_samples/os-used-limits/usedlimits-get-resp.json8
-rw-r--r--doc/api_samples/os-used-limits/usedlimits-get-resp.xml6
-rw-r--r--doc/source/conf.py4
-rw-r--r--doc/source/man/nova-conductor.rst45
-rw-r--r--etc/nova/api-paste.ini2
-rw-r--r--etc/nova/nova.conf.sample489
-rw-r--r--etc/nova/policy.json4
-rw-r--r--nova/api/auth.py1
-rw-r--r--nova/api/ec2/__init__.py1
-rw-r--r--nova/api/ec2/apirequest.py2
-rw-r--r--nova/api/ec2/cloud.py36
-rw-r--r--nova/api/ec2/ec2utils.py6
-rw-r--r--nova/api/ec2/faults.py1
-rw-r--r--nova/api/manager.py1
-rw-r--r--nova/api/metadata/base.py8
-rw-r--r--nova/api/metadata/handler.py1
-rw-r--r--nova/api/openstack/__init__.py13
-rw-r--r--nova/api/openstack/auth.py1
-rw-r--r--nova/api/openstack/common.py1
-rw-r--r--nova/api/openstack/compute/__init__.py1
-rw-r--r--nova/api/openstack/compute/contrib/__init__.py1
-rw-r--r--nova/api/openstack/compute/contrib/admin_actions.py2
-rw-r--r--nova/api/openstack/compute/contrib/certificates.py2
-rw-r--r--nova/api/openstack/compute/contrib/cloudpipe.py1
-rw-r--r--nova/api/openstack/compute/contrib/config_drive.py2
-rw-r--r--nova/api/openstack/compute/contrib/deferred_delete.py3
-rw-r--r--nova/api/openstack/compute/contrib/extended_server_attributes.py2
-rw-r--r--nova/api/openstack/compute/contrib/extended_status.py2
-rw-r--r--nova/api/openstack/compute/contrib/fixed_ips.py98
-rw-r--r--nova/api/openstack/compute/contrib/fping.py162
-rw-r--r--nova/api/openstack/compute/contrib/hosts.py2
-rw-r--r--nova/api/openstack/compute/contrib/instance_usage_audit_log.py1
-rw-r--r--nova/api/openstack/compute/contrib/networks.py2
-rw-r--r--nova/api/openstack/compute/contrib/rescue.py1
-rw-r--r--nova/api/openstack/compute/contrib/security_groups.py2
-rw-r--r--nova/api/openstack/compute/contrib/services.py141
-rw-r--r--nova/api/openstack/compute/contrib/simple_tenant_usage.py2
-rw-r--r--nova/api/openstack/compute/contrib/volumes.py24
-rw-r--r--nova/api/openstack/compute/extensions.py1
-rw-r--r--nova/api/openstack/compute/image_metadata.py2
-rw-r--r--nova/api/openstack/compute/images.py2
-rw-r--r--nova/api/openstack/compute/ips.py2
-rw-r--r--nova/api/openstack/compute/limits.py1
-rw-r--r--nova/api/openstack/compute/servers.py1
-rw-r--r--nova/api/openstack/compute/views/addresses.py2
-rw-r--r--nova/api/openstack/compute/views/images.py1
-rw-r--r--nova/api/openstack/compute/views/limits.py2
-rw-r--r--nova/api/openstack/compute/views/versions.py1
-rw-r--r--nova/api/openstack/extensions.py2
-rw-r--r--nova/api/openstack/wsgi.py8
-rw-r--r--nova/api/sizelimit.py1
-rw-r--r--nova/block_device.py1
-rw-r--r--nova/cert/manager.py2
-rw-r--r--nova/cert/rpcapi.py1
-rw-r--r--nova/cloudpipe/pipelib.py1
-rw-r--r--nova/compute/__init__.py4
-rw-r--r--nova/compute/api.py42
-rw-r--r--nova/compute/claims.py36
-rw-r--r--nova/compute/instance_types.py12
-rw-r--r--nova/compute/manager.py288
-rw-r--r--nova/compute/resource_tracker.py248
-rw-r--r--nova/compute/rpcapi.py31
-rw-r--r--nova/compute/stats.py4
-rw-r--r--nova/compute/utils.py12
-rw-r--r--nova/conductor/__init__.py25
-rw-r--r--nova/conductor/api.py61
-rw-r--r--nova/conductor/manager.py51
-rw-r--r--nova/conductor/rpcapi.py43
-rw-r--r--nova/config.py353
-rw-r--r--nova/console/api.py7
-rw-r--r--nova/console/manager.py12
-rw-r--r--nova/console/rpcapi.py7
-rw-r--r--nova/console/vmrc.py8
-rw-r--r--nova/console/vmrc_manager.py16
-rw-r--r--nova/console/xvp.py26
-rw-r--r--nova/consoleauth/__init__.py1
-rw-r--r--nova/consoleauth/manager.py1
-rw-r--r--nova/consoleauth/rpcapi.py1
-rw-r--r--nova/context.py4
-rw-r--r--nova/crypto.py1
-rw-r--r--nova/db/api.py382
-rw-r--r--nova/db/base.py9
-rw-r--r--nova/db/migration.py2
-rw-r--r--nova/db/sqlalchemy/api.py1032
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/083_quota_class.py63
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/084_quotas_unlimited.py42
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/086_set_engine_mysql_innodb.py44
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/087_add_uuid_to_bw_usage_cache.py58
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/088_change_instance_id_to_uuid_in_block_device_mapping.py80
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/088_sqlite_downgrade.sql97
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/088_sqlite_upgrade.sql97
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/089_add_volume_id_mappings.py117
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/090_modify_volume_id_datatype.py237
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/090_sqlite_downgrade.sql226
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/090_sqlite_upgrade.sql226
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/091_convert_volume_ids_to_uuid.py205
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/092_add_instance_system_metadata.py73
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/093_drop_instance_actions_table.py54
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/094_update_postgresql_sequence_names.py54
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/095_change_fk_instance_id_to_uuid.py94
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/095_sqlite_downgrade.sql133
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/095_sqlite_upgrade.sql132
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/096_recreate_dns_domains.py145
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/097_quota_usages_reservations.py106
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/098_update_volume_attach_time.py72
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/100_instance_metadata_uses_uuid.py80
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/100_sqlite_downgrade.sql64
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/100_sqlite_upgrade.sql64
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/101_security_group_instance_association_uses_uuid.py80
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/101_sqlite_downgrade.sql61
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/101_sqlite_upgrade.sql61
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/102_consoles_uses_uuid.py80
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/102_sqlite_downgrade.sql72
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/102_sqlite_upgrade.sql72
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/103_instance_indexes.py43
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/104_instance_indexes_2.py43
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/104_sqlite_downgrade.sql1
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/104_sqlite_upgrade.sql1
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/105_instance_info_caches_uses_uuid.py70
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/105_sqlite_downgrade.sql50
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/105_sqlite_upgrade.sql50
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/106_add_foreign_keys.py67
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/106_sqlite_downgrade.sql1
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/106_sqlite_upgrade.sql1
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/107_add_instance_id_mappings.py67
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/108_task_log.py62
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/109_drop_dns_domains_project_id_fkey.py63
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/109_sqlite_downgrade.sql53
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/109_sqlite_upgrade.sql52
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/110_drop_deprecated_auth.py189
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/111_general_aggregates.py72
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/112_update_deleted_instance_data.py69
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/113_fixed_ips_uses_uuid.py108
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/113_sqlite_downgrade.sql85
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/113_sqlite_upgrade.sql85
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/114_sqlite_downgrade.sql71
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/114_sqlite_upgrade.sql71
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/114_vifs_uses_uuid.py108
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/115_make_user_quotas_key_and_value.py94
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/116_drop_user_quotas_key_and_value.py98
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/117_add_compute_node_stats.py61
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/118_add_indexes_to_agent_builds.py44
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/120_add_indexes_to_block_device_mapping.py71
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/121_add_indexes_to_bw_usage_cache.py59
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/122_add_indexes_to_certificates.py59
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/124_add_indexes_to_fixed_ips.py76
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/125_add_indexes_to_floating_ips.py68
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/126_add_indexes_to_instance_faults.py44
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/127_add_indexes_to_instance_type_extra_specs.py44
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/128_add_indexes_to_instances.py96
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/129_add_indexes_to_iscsi_targets.py57
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/131_add_indexes_to_networks.py107
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/132_add_instance_type_projects.py67
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/133_aggregate_delete_fix.py48
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/133_folsom.py (renamed from nova/db/sqlalchemy/migrate_repo/versions/082_essex.py)748
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/135_add_node_to_instances.py55
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/136_add_index_to_instances.py (renamed from nova/db/sqlalchemy/migrate_repo/versions/119_add_indexes_to_aggregate_metadata.py)29
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/137_add_indexes_to_migrations.py (renamed from nova/db/sqlalchemy/migrate_repo/versions/123_add_indexes_to_dns_domains.py)16
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/138_drop_server_name_from_instances.py (renamed from nova/db/sqlalchemy/migrate_repo/versions/099_add_disabled_instance_types.py)21
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/138_sqlite_downgrade.sql239
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/138_sqlite_upgrade.sql239
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/139_add_indexes_to_fixed_ips.py (renamed from nova/db/sqlalchemy/migrate_repo/versions/130_add_indexes_to_key_pairs.py)16
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/140_drop_unused_postgresql_volume_sequences.py61
-rw-r--r--nova/db/sqlalchemy/migration.py4
-rw-r--r--nova/db/sqlalchemy/models.py96
-rw-r--r--nova/db/sqlalchemy/session.py27
-rw-r--r--nova/exception.py119
-rw-r--r--nova/filters.py53
-rw-r--r--nova/flags.py380
-rw-r--r--nova/image/glance.py1
-rw-r--r--nova/image/s3.py1
-rw-r--r--nova/ipv6/api.py1
-rw-r--r--nova/loadables.py116
-rw-r--r--nova/locale/nova.pot3115
-rw-r--r--nova/manager.py11
-rw-r--r--nova/network/__init__.py1
-rw-r--r--nova/network/api.py9
-rw-r--r--nova/network/l3.py2
-rw-r--r--nova/network/ldapdns.py1
-rw-r--r--nova/network/linux_net.py36
-rw-r--r--nova/network/manager.py48
-rw-r--r--nova/network/minidns.py1
-rw-r--r--nova/network/quantum/nova_ipam_lib.py2
-rw-r--r--nova/network/quantumv2/__init__.py1
-rw-r--r--nova/network/quantumv2/api.py1
-rw-r--r--nova/network/rpcapi.py14
-rw-r--r--nova/notifications.py1
-rw-r--r--nova/objectstore/s3server.py1
-rw-r--r--nova/openstack/common/cfg.py6
-rw-r--r--nova/openstack/common/eventlet_backdoor.py (renamed from nova/common/eventlet_backdoor.py)39
-rw-r--r--nova/openstack/common/gettextutils.py2
-rw-r--r--nova/openstack/common/lockutils.py1
-rw-r--r--nova/openstack/common/notifier/rabbit_notifier.py31
-rw-r--r--nova/openstack/common/notifier/rpc_notifier.py46
-rw-r--r--nova/openstack/common/rpc/__init__.py2
-rw-r--r--nova/openstack/common/rpc/impl_kombu.py19
-rw-r--r--nova/openstack/common/rpc/impl_qpid.py88
-rw-r--r--nova/openstack/common/rpc/impl_zmq.py2
-rw-r--r--nova/openstack/common/rpc/service.py70
-rw-r--r--nova/openstack/common/setup.py16
-rw-r--r--nova/openstack/common/uuidutils.py4
-rw-r--r--nova/policy.py1
-rw-r--r--nova/quota.py19
-rw-r--r--nova/scheduler/baremetal_host_manager.py71
-rw-r--r--nova/scheduler/chance.py1
-rw-r--r--nova/scheduler/driver.py10
-rw-r--r--nova/scheduler/filter_scheduler.py120
-rw-r--r--nova/scheduler/filters/__init__.py76
-rw-r--r--nova/scheduler/filters/compute_filter.py4
-rw-r--r--nova/scheduler/filters/core_filter.py3
-rw-r--r--nova/scheduler/filters/disk_filter.py1
-rw-r--r--nova/scheduler/filters/image_props_filter.py2
-rw-r--r--nova/scheduler/filters/io_ops_filter.py1
-rw-r--r--nova/scheduler/filters/isolated_hosts_filter.py1
-rw-r--r--nova/scheduler/filters/num_instances_filter.py1
-rw-r--r--nova/scheduler/filters/ram_filter.py1
-rw-r--r--nova/scheduler/filters/trusted_filter.py1
-rw-r--r--nova/scheduler/host_manager.py164
-rw-r--r--nova/scheduler/least_cost.py118
-rw-r--r--nova/scheduler/manager.py1
-rw-r--r--nova/scheduler/multi.py1
-rw-r--r--nova/scheduler/rpcapi.py1
-rw-r--r--nova/scheduler/scheduler_options.py1
-rw-r--r--nova/scheduler/weights/__init__.py61
-rw-r--r--nova/scheduler/weights/least_cost.py126
-rw-r--r--nova/scheduler/weights/ram.py46
-rw-r--r--nova/service.py14
-rw-r--r--nova/test.py99
-rw-r--r--nova/tests/__init__.py46
-rw-r--r--nova/tests/api/ec2/test_cinder_cloud.py58
-rw-r--r--nova/tests/api/ec2/test_cloud.py73
-rw-r--r--nova/tests/api/ec2/test_ec2_validate.py4
-rw-r--r--nova/tests/api/ec2/test_middleware.py20
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_admin_actions.py14
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_aggregates.py4
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_cloudpipe.py10
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_createserverext.py5
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_disk_config.py4
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_extended_server_attributes.py6
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_extended_status.py6
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_fixed_ips.py164
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_flavor_disabled.py6
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_flavor_rxtx.py6
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_flavor_swap.py6
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_floating_ips.py6
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_fping.py94
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_hosts.py3
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_networks.py11
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_quota_classes.py26
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_quotas.py25
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_rescue.py6
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_security_groups.py10
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_server_start_stop.py4
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_services.py198
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py5
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_snapshots.py4
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_volumes.py12
-rw-r--r--nova/tests/api/openstack/compute/test_consoles.py14
-rw-r--r--nova/tests/api/openstack/compute/test_extensions.py46
-rw-r--r--nova/tests/api/openstack/compute/test_flavors.py15
-rw-r--r--nova/tests/api/openstack/compute/test_image_metadata.py9
-rw-r--r--nova/tests/api/openstack/compute/test_images.py18
-rw-r--r--nova/tests/api/openstack/compute/test_limits.py9
-rw-r--r--nova/tests/api/openstack/compute/test_server_actions.py17
-rw-r--r--nova/tests/api/openstack/compute/test_server_metadata.py17
-rw-r--r--nova/tests/api/openstack/compute/test_servers.py86
-rw-r--r--nova/tests/api/openstack/compute/test_versions.py12
-rw-r--r--nova/tests/api/openstack/fakes.py7
-rw-r--r--nova/tests/api/test_sizelimit.py6
-rw-r--r--nova/tests/baremetal/db/__init__.py14
-rw-r--r--nova/tests/baremetal/db/base.py51
-rw-r--r--nova/tests/baremetal/db/test_bm_interface.py47
-rw-r--r--nova/tests/baremetal/db/test_bm_node.py140
-rw-r--r--nova/tests/baremetal/db/test_bm_pxe_ip.py93
-rw-r--r--nova/tests/baremetal/db/utils.py81
-rw-r--r--nova/tests/baremetal/test_proxy_bare_metal.py269
-rw-r--r--nova/tests/baremetal/test_tilera.py84
-rw-r--r--nova/tests/cert/test_rpcapi.py7
-rw-r--r--nova/tests/compute/test_claims.py53
-rw-r--r--nova/tests/compute/test_compute.py174
-rw-r--r--nova/tests/compute/test_compute_utils.py7
-rw-r--r--nova/tests/compute/test_multiple_nodes.py99
-rw-r--r--nova/tests/compute/test_resource_tracker.py412
-rw-r--r--nova/tests/compute/test_rpcapi.py17
-rw-r--r--nova/tests/conductor/__init__.py (renamed from nova/tests/baremetal/__init__.py)0
-rw-r--r--nova/tests/conductor/test_conductor.py133
-rw-r--r--nova/tests/console/test_console.py4
-rw-r--r--nova/tests/console/test_rpcapi.py7
-rw-r--r--nova/tests/consoleauth/test_consoleauth.py4
-rw-r--r--nova/tests/consoleauth/test_rpcapi.py7
-rw-r--r--nova/tests/db/fakes.py11
-rw-r--r--nova/tests/declare_flags.py6
-rw-r--r--nova/tests/fake_flags.py3
-rw-r--r--nova/tests/fake_loadables/__init__.py27
-rw-r--r--nova/tests/fake_loadables/fake_loadable1.py44
-rw-r--r--nova/tests/fake_loadables/fake_loadable2.py (renamed from nova/db/sqlalchemy/migrate_repo/versions/085_add_index_to_fixed_ips_by_address.py)38
-rw-r--r--nova/tests/fake_network.py10
-rw-r--r--nova/tests/fake_volume.py8
-rw-r--r--nova/tests/hyperv/README.rst83
-rw-r--r--nova/tests/hyperv/basetestcase.py4
-rw-r--r--nova/tests/hyperv/db_fakes.py42
-rw-r--r--nova/tests/hyperv/hypervutils.py13
-rw-r--r--nova/tests/hyperv/mockproxy.py47
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_nova.utils.p.gzbin0 -> 278 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_nova.virt.configdrive.p.gzbin0 -> 606 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_os.p.gzbin722 -> 722 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_shutil.p.gzbin289 -> 300 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_subprocess.p.gzbin2797 -> 2806 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_time.p.gzbin276 -> 445 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_uuid.p.gzbin674 -> 756 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_nova.utils.p.gzbin0 -> 308 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_nova.virt.configdrive.p.gzbin0 -> 634 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_os.p.gzbin755 -> 753 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_shutil.p.gzbin320 -> 331 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_subprocess.p.gzbin591 -> 605 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_time.p.gzbin290 -> 458 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_uuid.p.gzbin658 -> 743 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_wmi.p.gzbin22780 -> 21875 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_wmi.p.gzbin28844 -> 28365 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_nova.utils.p.gzbin0 -> 281 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_nova.virt.configdrive.p.gzbin0 -> 607 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_shutil.p.gzbin292 -> 303 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_subprocess.p.gzbin2800 -> 2810 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_time.p.gzbin275 -> 446 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_uuid.p.gzbin592 -> 675 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_wmi.p.gzbin19845 -> 19114 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_nova.utils.p.gzbin0 -> 272 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_nova.virt.configdrive.p.gzbin0 -> 599 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_os.p.gzbin748 -> 750 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_shutil.p.gzbin283 -> 294 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_time.p.gzbin253 -> 421 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_uuid.p.gzbin627 -> 705 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_wmi.p.gzbin24040 -> 23221 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_nova.utils.p.gzbin0 -> 278 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_nova.virt.configdrive.p.gzbin0 -> 605 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_os.p.gzbin723 -> 724 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_shutil.p.gzbin289 -> 300 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_subprocess.p.gzbin2798 -> 2806 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_time.p.gzbin275 -> 446 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_uuid.p.gzbin671 -> 756 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_wmi.p.gzbin29537 -> 29078 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_multiprocessing.p.gzbin270 -> 270 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_os.p.gzbin0 -> 423 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_shutil.p.gzbin298 -> 309 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_wmi.p.gzbin1013 -> 980 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_host_stats_os.p.gzbin0 -> 415 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_host_stats_shutil.p.gzbin290 -> 301 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_host_stats_wmi.p.gzbin888 -> 845 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_nova.utils.p.gzbin0 -> 273 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_nova.virt.configdrive.p.gzbin0 -> 598 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_os.p.gzbin717 -> 718 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_shutil.p.gzbin284 -> 295 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_time.p.gzbin254 -> 422 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_uuid.p.gzbin626 -> 710 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_wmi.p.gzbin23400 -> 22457 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_list_instances_shutil.p.gzbin290 -> 301 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_list_instances_wmi.p.gzbin1300 -> 852 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_nova.utils.p.gzbin0 -> 279 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_nova.virt.configdrive.p.gzbin0 -> 605 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_os.p.gzbin603 -> 602 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_shutil.p.gzbin290 -> 301 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_time.p.gzbin260 -> 435 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_uuid.p.gzbin631 -> 718 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_nova.utils.p.gzbin0 -> 298 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_nova.virt.configdrive.p.gzbin0 -> 625 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_os.p.gzbin621 -> 622 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_shutil.p.gzbin310 -> 321 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_time.p.gzbin280 -> 448 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_uuid.p.gzbin649 -> 734 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_wmi.p.gzbin23876 -> 22937 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_wmi.p.gzbin26172 -> 27619 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_nova.utils.p.gzbin0 -> 285 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_nova.virt.configdrive.p.gzbin0 -> 612 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_os.p.gzbin728 -> 731 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_shutil.p.gzbin296 -> 307 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_time.p.gzbin266 -> 431 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_uuid.p.gzbin638 -> 720 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_wmi.p.gzbin23490 -> 22746 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_nova.utils.p.gzbin0 -> 271 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_nova.virt.configdrive.p.gzbin0 -> 597 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_os.p.gzbin716 -> 715 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_shutil.p.gzbin281 -> 292 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_time.p.gzbin251 -> 416 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_uuid.p.gzbin624 -> 707 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_wmi.p.gzbin23350 -> 22561 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_nova.utils.p.gzbin0 -> 294 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_nova.virt.configdrive.p.gzbin0 -> 620 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_os.p.gzbin740 -> 738 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_shutil.p.gzbin305 -> 316 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_time.p.gzbin275 -> 440 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_uuid.p.gzbin646 -> 729 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_wmi.p.gzbin23323 -> 22603 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_nova.utils.p.gzbin0 -> 273 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_nova.virt.configdrive.p.gzbin0 -> 599 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_os.p.gzbin719 -> 720 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_shutil.p.gzbin285 -> 296 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_time.p.gzbin255 -> 419 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_uuid.p.gzbin625 -> 711 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_wmi.p.gzbin23258 -> 22438 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_nova.utils.p.gzbin0 -> 288 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_nova.virt.configdrive.p.gzbin0 -> 614 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_os.p.gzbin734 -> 734 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_shutil.p.gzbin300 -> 311 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_time.p.gzbin270 -> 435 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_uuid.p.gzbin640 -> 723 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_wmi.p.gzbin23305 -> 22436 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_nova.utils.p.gzbin0 -> 273 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_nova.virt.configdrive.p.gzbin0 -> 599 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_os.p.gzbin718 -> 718 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_shutil.p.gzbin284 -> 295 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_time.p.gzbin254 -> 419 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_uuid.p.gzbin626 -> 711 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_wmi.p.gzbin23962 -> 22901 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_nova.utils.p.gzbin0 -> 343 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_os.p.gzbin536 -> 536 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_shutil.p.gzbin304 -> 315 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_time.p.gzbin273 -> 273 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_uuid.p.gzbin335 -> 385 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_wmi.p.gzbin1382 -> 1419 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_nova.utils.p.gzbin0 -> 345 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_shutil.p.gzbin307 -> 318 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_uuid.p.gzbin337 -> 388 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_wmi.p.gzbin849 -> 851 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_nova.utils.p.gzbin0 -> 270 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_nova.virt.configdrive.p.gzbin0 -> 598 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_os.p.gzbin717 -> 717 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_shutil.p.gzbin282 -> 293 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_time.p.gzbin252 -> 418 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_uuid.p.gzbin623 -> 710 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_wmi.p.gzbin23931 -> 22707 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_nova.utils.p.gzbin0 -> 287 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_nova.virt.configdrive.p.gzbin0 -> 614 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_os.p.gzbin733 -> 732 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_shutil.p.gzbin298 -> 309 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_time.p.gzbin268 -> 434 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_uuid.p.gzbin640 -> 724 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_wmi.p.gzbin23341 -> 22446 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_nova.utils.p.gzbin0 -> 270 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_nova.virt.configdrive.p.gzbin0 -> 597 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_os.p.gzbin716 -> 716 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_shutil.p.gzbin282 -> 293 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_time.p.gzbin252 -> 417 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_uuid.p.gzbin623 -> 705 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_wmi.p.gzbin24291 -> 23012 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_nova.utils.p.gzbin0 -> 273 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_nova.virt.configdrive.p.gzbin0 -> 600 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_os.p.gzbin1012 -> 1012 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_shutil.p.gzbin416 -> 439 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_time.p.gzbin254 -> 418 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_uuid.p.gzbin667 -> 750 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_nova.utils.p.gzbin0 -> 293 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_nova.virt.configdrive.p.gzbin0 -> 618 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_os.p.gzbin1033 -> 1034 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_shutil.p.gzbin437 -> 459 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_time.p.gzbin274 -> 439 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_uuid.p.gzbin688 -> 772 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_wmi.p.gzbin24794 -> 23717 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_wmi.p.gzbin24505 -> 23697 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_nova.utils.p.gzbin0 -> 289 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_nova.virt.configdrive.p.gzbin0 -> 827 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_os.p.gzbin0 -> 888 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_shutil.p.gzbin0 -> 311 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_time.p.gzbin0 -> 433 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_uuid.p.gzbin0 -> 811 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_wmi.p.gzbin0 -> 30237 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_nova.utils.p.gzbin0 -> 493 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_nova.virt.configdrive.p.gzbin0 -> 819 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_os.p.gzbin0 -> 914 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_shutil.p.gzbin0 -> 305 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_time.p.gzbin0 -> 427 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_uuid.p.gzbin0 -> 804 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_wmi.p.gzbin0 -> 29742 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_nova.utils.p.gzbin0 -> 279 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_nova.virt.configdrive.p.gzbin0 -> 607 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_os.p.gzbin724 -> 726 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_shutil.p.gzbin291 -> 302 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_time.p.gzbin261 -> 425 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_uuid.p.gzbin631 -> 715 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_wmi.p.gzbin24716 -> 23911 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_nova.utils.p.gzbin0 -> 286 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_nova.virt.configdrive.p.gzbin0 -> 615 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_os.p.gzbin0 -> 731 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_shutil.p.gzbin0 -> 308 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_time.p.gzbin0 -> 431 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_uuid.p.gzbin0 -> 722 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_wmi.p.gzbin0 -> 23033 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_nova.utils.p.gzbin0 -> 282 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_nova.virt.configdrive.p.gzbin0 -> 609 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_os.p.gzbin607 -> 605 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_shutil.p.gzbin294 -> 305 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_time.p.gzbin264 -> 428 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_uuid.p.gzbin635 -> 719 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_wmi.p.gzbin24420 -> 23460 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_nova.utils.p.gzbin0 -> 291 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_os.p.gzbin737 -> 736 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_shutil.p.gzbin302 -> 313 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_time.p.gzbin271 -> 436 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_uuid.p.gzbin558 -> 642 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_wmi.p.gzbin17307 -> 17429 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_nova.utils.p.gzbin0 -> 290 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_nova.virt.configdrive.p.gzbin0 -> 616 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_os.p.gzbin734 -> 734 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_shutil.p.gzbin301 -> 312 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_time.p.gzbin271 -> 437 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_uuid.p.gzbin643 -> 728 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_wmi.p.gzbin24133 -> 22775 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_nova.utils.p.gzbin0 -> 272 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_nova.virt.configdrive.p.gzbin0 -> 599 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_os.p.gzbin717 -> 717 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_shutil.p.gzbin283 -> 294 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_time.p.gzbin253 -> 418 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_uuid.p.gzbin623 -> 709 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_wmi.p.gzbin23864 -> 22663 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_nova.utils.p.gzbin0 -> 288 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_nova.virt.configdrive.p.gzbin0 -> 615 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_os.p.gzbin735 -> 734 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_shutil.p.gzbin299 -> 310 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_time.p.gzbin269 -> 433 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_uuid.p.gzbin640 -> 723 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_wmi.p.gzbin23690 -> 22387 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_nova.utils.p.gzbin0 -> 271 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_nova.virt.configdrive.p.gzbin0 -> 598 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_os.p.gzbin717 -> 717 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_shutil.p.gzbin283 -> 294 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_time.p.gzbin253 -> 419 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_uuid.p.gzbin626 -> 710 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_wmi.p.gzbin24099 -> 22805 bytes
-rw-r--r--nova/tests/image/fake.py34
-rw-r--r--nova/tests/image/test_glance.py11
-rw-r--r--nova/tests/integrated/api_samples/README.rst2
-rw-r--r--nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl24
-rw-r--r--nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl9
-rw-r--r--nova/tests/integrated/api_samples/limit-get-resp.json.tpl15
-rw-r--r--nova/tests/integrated/api_samples/limit-get-resp.xml.tpl5
-rw-r--r--nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-req.json.tpl5
-rw-r--r--nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-req.xml.tpl2
-rw-r--r--nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-resp.json.tpl6
-rw-r--r--nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-resp.xml.tpl5
-rw-r--r--nova/tests/integrated/api_samples/os-consoles/server-post-req.json.tpl16
-rw-r--r--nova/tests/integrated/api_samples/os-consoles/server-post-req.xml.tpl19
-rw-r--r--nova/tests/integrated/api_samples/os-consoles/server-post-resp.json.tpl16
-rw-r--r--nova/tests/integrated/api_samples/os-consoles/server-post-resp.xml.tpl6
-rw-r--r--nova/tests/integrated/api_samples/os-used-limits/usedlimits-get-resp.json.tpl19
-rw-r--r--nova/tests/integrated/api_samples/os-used-limits/usedlimits-get-resp.xml.tpl7
-rw-r--r--nova/tests/integrated/integrated_helpers.py4
-rw-r--r--nova/tests/integrated/test_api_samples.py44
-rw-r--r--nova/tests/integrated/test_extensions.py7
-rw-r--r--nova/tests/integrated/test_multiprocess_api.py3
-rw-r--r--nova/tests/integrated/test_servers.py4
-rw-r--r--nova/tests/matchers.py196
-rw-r--r--nova/tests/network/test_api.py70
-rw-r--r--nova/tests/network/test_linux_net.py11
-rw-r--r--nova/tests/network/test_manager.py48
-rw-r--r--nova/tests/network/test_quantumv2.py30
-rw-r--r--nova/tests/network/test_rpcapi.py12
-rw-r--r--nova/tests/policy.json4
-rw-r--r--nova/tests/runtime_flags.py6
-rw-r--r--nova/tests/scheduler/fakes.py26
-rw-r--r--nova/tests/scheduler/test_filter_scheduler.py76
-rw-r--r--nova/tests/scheduler/test_host_filters.py223
-rw-r--r--nova/tests/scheduler/test_host_manager.py328
-rw-r--r--nova/tests/scheduler/test_least_cost.py114
-rw-r--r--nova/tests/scheduler/test_rpcapi.py7
-rw-r--r--nova/tests/scheduler/test_scheduler.py11
-rw-r--r--nova/tests/scheduler/test_weights.py117
-rw-r--r--nova/tests/test_api.py12
-rw-r--r--nova/tests/test_bdm.py3
-rw-r--r--nova/tests/test_configdrive2.py5
-rw-r--r--nova/tests/test_crypto.py4
-rw-r--r--nova/tests/test_db_api.py323
-rw-r--r--nova/tests/test_filters.py125
-rw-r--r--nova/tests/test_flags.py44
-rw-r--r--nova/tests/test_hypervapi.py68
-rw-r--r--nova/tests/test_image_utils.py83
-rw-r--r--nova/tests/test_imagebackend.py8
-rw-r--r--nova/tests/test_imagecache.py35
-rw-r--r--nova/tests/test_instance_types.py3
-rw-r--r--nova/tests/test_libvirt.py250
-rw-r--r--nova/tests/test_libvirt_config.py6
-rw-r--r--nova/tests/test_libvirt_utils.py38
-rw-r--r--nova/tests/test_libvirt_vif.py61
-rw-r--r--nova/tests/test_loadables.py113
-rw-r--r--nova/tests/test_matchers.py144
-rw-r--r--nova/tests/test_metadata.py6
-rw-r--r--nova/tests/test_migrations.py200
-rw-r--r--nova/tests/test_notifications.py3
-rw-r--r--nova/tests/test_nova_manage.py4
-rw-r--r--nova/tests/test_nova_rootwrap.py51
-rw-r--r--nova/tests/test_objectstore.py19
-rw-r--r--nova/tests/test_pipelib.py9
-rw-r--r--nova/tests/test_policy.py5
-rw-r--r--nova/tests/test_powervm.py30
-rw-r--r--nova/tests/test_quota.py95
-rw-r--r--nova/tests/test_service.py6
-rw-r--r--nova/tests/test_utils.py48
-rw-r--r--nova/tests/test_virt.py4
-rw-r--r--nova/tests/test_vmwareapi.py5
-rw-r--r--nova/tests/test_xenapi.py127
-rw-r--r--nova/tests/utils.py6
-rw-r--r--nova/tests/vmwareapi/db_fakes.py3
-rw-r--r--nova/tests/xenapi/test_vm_utils.py49
-rw-r--r--nova/utils.py42
-rw-r--r--nova/virt/baremetal/db/__init__.py (renamed from nova/virt/baremetal/__init__.py)8
-rw-r--r--nova/virt/baremetal/db/api.py175
-rw-r--r--nova/virt/baremetal/db/migration.py38
-rw-r--r--nova/virt/baremetal/db/sqlalchemy/__init__.py14
-rw-r--r--nova/virt/baremetal/db/sqlalchemy/api.py351
-rw-r--r--nova/virt/baremetal/db/sqlalchemy/migrate_repo/__init__.py14
-rw-r--r--nova/virt/baremetal/db/sqlalchemy/migrate_repo/migrate.cfg20
-rw-r--r--nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/001_init.py124
-rw-r--r--nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/__init__.py14
-rw-r--r--nova/virt/baremetal/db/sqlalchemy/migration.py114
-rw-r--r--nova/virt/baremetal/db/sqlalchemy/models.py80
-rw-r--r--nova/virt/baremetal/db/sqlalchemy/session.py58
-rw-r--r--nova/virt/baremetal/doc/README.rst69
-rw-r--r--nova/virt/baremetal/dom.py266
-rw-r--r--nova/virt/baremetal/driver.py742
-rw-r--r--nova/virt/baremetal/fake.py157
-rw-r--r--nova/virt/baremetal/nodes.py42
-rw-r--r--nova/virt/baremetal/tilera.py364
-rw-r--r--nova/virt/configdrive.py24
-rw-r--r--nova/virt/disk/api.py25
-rw-r--r--nova/virt/disk/mount/__init__.py19
-rw-r--r--nova/virt/disk/mount/api.py (renamed from nova/virt/disk/mount.py)0
-rw-r--r--nova/virt/disk/mount/guestfs.py (renamed from nova/virt/disk/guestfs.py)4
-rw-r--r--nova/virt/disk/mount/loop.py (renamed from nova/virt/disk/loop.py)4
-rw-r--r--nova/virt/disk/mount/nbd.py (renamed from nova/virt/disk/nbd.py)14
-rw-r--r--nova/virt/driver.py22
-rw-r--r--nova/virt/fake.py80
-rw-r--r--nova/virt/firewall.py50
-rw-r--r--nova/virt/hyperv/constants.py3
-rw-r--r--nova/virt/hyperv/driver.py6
-rw-r--r--nova/virt/hyperv/hostops.py50
-rw-r--r--nova/virt/hyperv/livemigrationops.py6
-rw-r--r--nova/virt/hyperv/snapshotops.py6
-rw-r--r--nova/virt/hyperv/vmops.py180
-rw-r--r--nova/virt/hyperv/vmutils.py22
-rw-r--r--nova/virt/hyperv/volumeops.py12
-rw-r--r--nova/virt/hyperv/volumeutils.py8
-rw-r--r--nova/virt/images.py8
-rw-r--r--nova/virt/libvirt/config.py4
-rw-r--r--nova/virt/libvirt/driver.py256
-rw-r--r--nova/virt/libvirt/firewall.py20
-rw-r--r--nova/virt/libvirt/imagebackend.py28
-rw-r--r--nova/virt/libvirt/imagecache.py24
-rw-r--r--nova/virt/libvirt/utils.py18
-rw-r--r--nova/virt/libvirt/vif.py53
-rw-r--r--nova/virt/libvirt/volume.py29
-rw-r--r--nova/virt/libvirt/volume_nfs.py8
-rw-r--r--nova/virt/netutils.py7
-rw-r--r--nova/virt/powervm/driver.py8
-rw-r--r--nova/virt/powervm/operator.py26
-rw-r--r--nova/virt/virtapi.py69
-rw-r--r--nova/virt/vmwareapi/driver.py26
-rw-r--r--nova/virt/vmwareapi/read_write_util.py4
-rw-r--r--nova/virt/vmwareapi/vif.py8
-rw-r--r--nova/virt/vmwareapi/vim.py10
-rw-r--r--nova/virt/vmwareapi/vmops.py10
-rw-r--r--nova/virt/xenapi/agent.py18
-rw-r--r--nova/virt/xenapi/driver.py46
-rw-r--r--nova/virt/xenapi/firewall.py14
-rw-r--r--nova/virt/xenapi/host.py26
-rw-r--r--nova/virt/xenapi/pool.py51
-rw-r--r--nova/virt/xenapi/pool_states.py4
-rw-r--r--nova/virt/xenapi/vif.py10
-rw-r--r--nova/virt/xenapi/vm_utils.py138
-rw-r--r--nova/virt/xenapi/vmops.py58
-rw-r--r--nova/virt/xenapi/volume_utils.py16
-rw-r--r--nova/vnc/__init__.py1
-rw-r--r--nova/vnc/xvp_proxy.py1
-rw-r--r--nova/volume/__init__.py1
-rw-r--r--nova/volume/cinder.py1
-rw-r--r--nova/volume/driver.py954
-rw-r--r--nova/volume/iscsi.py235
-rw-r--r--nova/weights.py71
-rw-r--r--nova/wsgi.py1
-rw-r--r--openstack-common.conf2
-rw-r--r--smoketests/test_sysadmin.py18
-rwxr-xr-xtools/hacking.py7
-rw-r--r--tools/test-requires1
-rw-r--r--tools/xenserver/destroy_cached_images.py2
-rwxr-xr-xtools/xenserver/vm_vdi_cleaner.py1
-rw-r--r--tox.ini4
729 files changed, 12250 insertions, 16534 deletions
diff --git a/.gitignore b/.gitignore
index 51ab4183a..3b1ca1066 100644
--- a/.gitignore
+++ b/.gitignore
@@ -6,6 +6,7 @@
*.swo
*.swp
*.sqlite
+*~
.autogenerated
.coverage
.nova-venv
diff --git a/HACKING.rst b/HACKING.rst
index 52dc38bf8..bf8274cea 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -33,6 +33,7 @@ Imports
- Do not import objects, only modules (*)
- Do not import more than one module per line (*)
- Do not make relative imports
+- Do not make new nova.db imports in nova/virt/*
- Order your imports by the full module path
- Organize your imports according to the following template
diff --git a/bin/nova-all b/bin/nova-all
index 531116d69..dbbd50887 100755
--- a/bin/nova-all
+++ b/bin/nova-all
@@ -41,7 +41,6 @@ if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")):
from nova import config
-from nova import flags
from nova.objectstore import s3server
from nova.openstack.common import log as logging
from nova import service
diff --git a/bin/nova-api b/bin/nova-api
index 776152e43..5bb7f31b7 100755
--- a/bin/nova-api
+++ b/bin/nova-api
@@ -37,7 +37,6 @@ if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")):
from nova import config
-from nova import flags
from nova.openstack.common import log as logging
from nova import service
from nova import utils
diff --git a/bin/nova-api-ec2 b/bin/nova-api-ec2
index f165b5ce9..c7b08845d 100755
--- a/bin/nova-api-ec2
+++ b/bin/nova-api-ec2
@@ -33,7 +33,6 @@ if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")):
from nova import config
-from nova import flags
from nova.openstack.common import log as logging
from nova import service
from nova import utils
diff --git a/bin/nova-api-metadata b/bin/nova-api-metadata
index f50e5ce84..e7cac260d 100755
--- a/bin/nova-api-metadata
+++ b/bin/nova-api-metadata
@@ -33,7 +33,6 @@ if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")):
from nova import config
-from nova import flags
from nova.openstack.common import log as logging
from nova import service
from nova import utils
diff --git a/bin/nova-api-os-compute b/bin/nova-api-os-compute
index 5cf5f6910..02f16a04a 100755
--- a/bin/nova-api-os-compute
+++ b/bin/nova-api-os-compute
@@ -33,7 +33,6 @@ if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")):
from nova import config
-from nova import flags
from nova.openstack.common import log as logging
from nova import service
from nova import utils
diff --git a/bin/nova-cert b/bin/nova-cert
index 441bda9e5..ffda0e547 100755
--- a/bin/nova-cert
+++ b/bin/nova-cert
@@ -33,7 +33,6 @@ if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'nova', '__init__.py')):
from nova import config
-from nova import flags
from nova.openstack.common import log as logging
from nova import service
from nova import utils
diff --git a/bin/nova-clear-rabbit-queues b/bin/nova-clear-rabbit-queues
index be1d98e3e..90e9128f3 100755
--- a/bin/nova-clear-rabbit-queues
+++ b/bin/nova-clear-rabbit-queues
@@ -40,10 +40,9 @@ if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'nova', '__init__.py')):
gettext.install('nova', unicode=1)
+from nova import config
from nova import context
from nova import exception
-from nova import config
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
diff --git a/bin/nova-compute b/bin/nova-compute
index f850e1b8c..78f4efd20 100755
--- a/bin/nova-compute
+++ b/bin/nova-compute
@@ -42,7 +42,6 @@ if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'nova', '__init__.py')):
from nova import config
-from nova import flags
from nova.openstack.common import log as logging
from nova import service
from nova import utils
diff --git a/bin/nova-conductor b/bin/nova-conductor
new file mode 100755
index 000000000..2dba8ee1b
--- /dev/null
+++ b/bin/nova-conductor
@@ -0,0 +1,51 @@
+#!/usr/bin/env python
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Starter script for Nova Conductor."""
+
+import eventlet
+eventlet.monkey_patch()
+
+import os
+import sys
+
+# If ../nova/__init__.py exists, add ../ to Python search path, so that
+# it will override what happens to be installed in /usr/(local/)lib/python...
+possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
+ os.pardir,
+ os.pardir))
+if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
+ sys.path.insert(0, possible_topdir)
+
+
+from nova import config
+from nova.openstack.common import log as logging
+from nova import service
+from nova import utils
+
+CONF = config.CONF
+CONF.import_opt('topic', 'nova.conductor.api', group='conductor')
+
+if __name__ == '__main__':
+ config.parse_args(sys.argv)
+ logging.setup("nova")
+ utils.monkey_patch()
+ server = service.Service.create(binary='nova-conductor',
+ topic=CONF.conductor.topic,
+ manager=CONF.conductor.manager)
+ service.serve(server)
+ service.wait()
diff --git a/bin/nova-console b/bin/nova-console
index c75e088c8..6a363c1e7 100755
--- a/bin/nova-console
+++ b/bin/nova-console
@@ -34,7 +34,6 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
from nova import config
-from nova import flags
from nova.openstack.common import log as logging
from nova import service
diff --git a/bin/nova-consoleauth b/bin/nova-consoleauth
index 654a3f824..791ac2de5 100755
--- a/bin/nova-consoleauth
+++ b/bin/nova-consoleauth
@@ -31,9 +31,8 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
sys.path.insert(0, possible_topdir)
-from nova.consoleauth import manager
from nova import config
-from nova import flags
+from nova.consoleauth import manager
from nova.openstack.common import log as logging
from nova import service
diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge
index ed36c47bc..21cd5db00 100755
--- a/bin/nova-dhcpbridge
+++ b/bin/nova-dhcpbridge
@@ -35,10 +35,9 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
gettext.install('nova', unicode=1)
+from nova import config
from nova import context
from nova import db
-from nova import config
-from nova import flags
from nova.network import linux_net
from nova.network import rpcapi as network_rpcapi
from nova.openstack.common import importutils
diff --git a/bin/nova-manage b/bin/nova-manage
index 43ff5ebca..cca67398c 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -78,7 +78,6 @@ from nova import context
from nova import db
from nova.db import migration
from nova import exception
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
@@ -915,146 +914,6 @@ class InstanceTypeCommands(object):
_db_error(e)
-class StorageManagerCommands(object):
- """Class for mangaging Storage Backends and Flavors"""
-
- def flavor_list(self, flavor=None):
- ctxt = context.get_admin_context()
-
- try:
- if flavor is None:
- flavors = db.sm_flavor_get_all(ctxt)
- else:
- flavors = db.sm_flavor_get(ctxt, flavor)
- except exception.NotFound as ex:
- print _('error: %s') % ex
- sys.exit(2)
-
- print "%-18s\t%-20s\t%s" % (_('id'),
- _('Label'),
- _('Description'))
-
- for flav in flavors:
- print "%-18s\t%-20s\t%s" % (
- flav['id'],
- flav['label'],
- flav['description'])
-
- def flavor_create(self, label, desc):
- # TODO(renukaapte) flavor name must be unique
- try:
- db.sm_flavor_create(context.get_admin_context(),
- dict(label=label,
- description=desc))
- except exception.DBError, e:
- _db_error(e)
-
- def flavor_delete(self, label):
- try:
- db.sm_flavor_delete(context.get_admin_context(), label)
-
- except exception.DBError, e:
- _db_error(e)
-
- def _splitfun(self, item):
- i = item.split("=")
- return i[0:2]
-
- def backend_list(self, backend_conf_id=None):
- ctxt = context.get_admin_context()
-
- try:
- if backend_conf_id is None:
- backends = db.sm_backend_conf_get_all(ctxt)
- else:
- backends = db.sm_backend_conf_get(ctxt, backend_conf_id)
-
- except exception.NotFound as ex:
- print _('error: %s') % ex
- sys.exit(2)
-
- print "%-5s\t%-10s\t%-40s\t%-10s\t%s" % (_('id'),
- _('Flavor id'),
- _('SR UUID'),
- _('SR Type'),
- _('Config Parameters'),)
-
- for b in backends:
- print "%-5s\t%-10s\t%-40s\t%-10s\t%s" % (b['id'],
- b['flavor_id'],
- b['sr_uuid'],
- b['sr_type'],
- b['config_params'],)
-
- def backend_add(self, flavor_label, sr_type, *args):
- # TODO(renukaapte) Add backend_introduce.
- ctxt = context.get_admin_context()
- params = dict(map(self._splitfun, args))
-
- if 'sr_uuid' in params:
- try:
- backend = db.sm_backend_conf_get_by_sr(ctxt,
- params['sr_uuid'])
- except exception.DBError, e:
- _db_error(e)
-
- if backend:
- print _('Backend config found. Would you like to recreate '
- 'this?')
- print _('(WARNING:Recreating will destroy all VDIs on '
- 'backend!!)')
- c = raw_input(_('Proceed? (y/n) '))
- if c == 'y' or c == 'Y':
- try:
- db.sm_backend_conf_update(ctxt, backend['id'],
- dict(created=False))
- except exception.DBError, e:
- _db_error(e)
- return
-
- else:
- print _('Backend config not found. Would you like to create '
- 'it?')
- print _('(WARNING: Creating will destroy all data on '
- 'backend!!!)')
- c = raw_input(_('Proceed? (y/n) '))
- if c != 'y' and c != 'Y':
- return
-
- print _('(WARNING: Creating will destroy all data on backend!!!)')
- c = raw_input(_('Proceed? (y/n) '))
- if c == 'y' or c == 'Y':
- if flavor_label is None:
- print _('error: backend needs to be associated with flavor')
- sys.exit(2)
-
- try:
- flavors = db.sm_flavor_get_by_label(ctxt, flavor_label)
- except exception.NotFound as ex:
- print _('error: %s') % ex
- sys.exit(2)
-
- config_params = "".join(['%s=%s ' %
- (key, params[key]) for key in params])
-
- try:
- db.sm_backend_conf_create(ctxt,
- dict(flavor_id=flavors['id'],
- sr_uuid=None,
- sr_type=sr_type,
- config_params=config_params))
- except exception.DBError, e:
- _db_error(e)
-
- def backend_remove(self, backend_conf_id):
- try:
- db.sm_backend_conf_delete(context.get_admin_context(),
- backend_conf_id)
-
- except exception.DBError, e:
- _db_error(e)
-
-
class AgentBuildCommands(object):
"""Class for managing agent builds."""
@@ -1179,7 +1038,6 @@ CATEGORIES = [
('project', ProjectCommands),
('service', ServiceCommands),
('shell', ShellCommands),
- ('sm', StorageManagerCommands),
('version', VersionCommands),
('vm', VmCommands),
('vpn', VpnCommands),
diff --git a/bin/nova-network b/bin/nova-network
index def7782d7..fe90e4cce 100755
--- a/bin/nova-network
+++ b/bin/nova-network
@@ -35,7 +35,6 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
from nova import config
-from nova import flags
from nova.openstack.common import log as logging
from nova import service
from nova import utils
diff --git a/bin/nova-novncproxy b/bin/nova-novncproxy
index 1ba43aa01..ad901656d 100755
--- a/bin/nova-novncproxy
+++ b/bin/nova-novncproxy
@@ -31,7 +31,6 @@ import websockify
from nova import config
from nova.consoleauth import rpcapi as consoleauth_rpcapi
from nova import context
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
diff --git a/bin/nova-objectstore b/bin/nova-objectstore
index 9b9e2b7a7..8ec9fbf35 100755
--- a/bin/nova-objectstore
+++ b/bin/nova-objectstore
@@ -35,7 +35,6 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
from nova import config
-from nova import flags
from nova.objectstore import s3server
from nova.openstack.common import log as logging
from nova import service
diff --git a/bin/nova-rpc-zmq-receiver b/bin/nova-rpc-zmq-receiver
index d6849ce9d..e9093f1a5 100755
--- a/bin/nova-rpc-zmq-receiver
+++ b/bin/nova-rpc-zmq-receiver
@@ -34,7 +34,6 @@ if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'nova', '__init__.py')):
from nova import config
from nova import exception
-from nova import flags
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
from nova.openstack.common.rpc import impl_zmq
diff --git a/bin/nova-scheduler b/bin/nova-scheduler
index 73dfab207..7730c5cb0 100755
--- a/bin/nova-scheduler
+++ b/bin/nova-scheduler
@@ -37,7 +37,6 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
gettext.install('nova', unicode=1)
from nova import config
-from nova import flags
from nova.openstack.common import log as logging
from nova import service
from nova import utils
diff --git a/bin/nova-xvpvncproxy b/bin/nova-xvpvncproxy
index b816bf2e9..7882645ad 100755
--- a/bin/nova-xvpvncproxy
+++ b/bin/nova-xvpvncproxy
@@ -32,7 +32,6 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
from nova import config
-from nova import flags
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
from nova import service
diff --git a/doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.json b/doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.json
index 337bc26db..ad3bcab5d 100644
--- a/doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.json
+++ b/doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.json
@@ -1,7 +1,7 @@
{
"server": {
- "OS-EXT-SRV-ATTR:host": "f852c7db3f344eec955c369b8478fef7",
- "OS-EXT-SRV-ATTR:hypervisor_hostname": null,
+ "OS-EXT-SRV-ATTR:host": "1169a68456af48238da47b1d5957a714",
+ "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
"OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
"accessIPv4": "",
"accessIPv6": "",
@@ -13,7 +13,7 @@
}
]
},
- "created": "2012-09-13T17:48:30Z",
+ "created": "2012-11-15T19:27:04Z",
"flavor": {
"id": "1",
"links": [
@@ -23,8 +23,8 @@
}
]
},
- "hostId": "4f846b99d954c7eb75dcbb25e1b92ccc5e77ba74f2bf22c2d8dd24d5",
- "id": "0b57a8d2-b1d0-43d8-b81b-9ef446281bfa",
+ "hostId": "2dfce43c41dd288cfac3a5b4251742b3bd2b37c12eb5927e757d9b4c",
+ "id": "1fc2392e-5727-46af-bc21-317a4a3eb04c",
"image": {
"id": "70a599e0-31e7-49b7-b260-868f441e862b",
"links": [
@@ -36,11 +36,11 @@
},
"links": [
{
- "href": "http://openstack.example.com/v2/openstack/servers/0b57a8d2-b1d0-43d8-b81b-9ef446281bfa",
+ "href": "http://openstack.example.com/v2/openstack/servers/1fc2392e-5727-46af-bc21-317a4a3eb04c",
"rel": "self"
},
{
- "href": "http://openstack.example.com/openstack/servers/0b57a8d2-b1d0-43d8-b81b-9ef446281bfa",
+ "href": "http://openstack.example.com/openstack/servers/1fc2392e-5727-46af-bc21-317a4a3eb04c",
"rel": "bookmark"
}
],
@@ -51,7 +51,7 @@
"progress": 0,
"status": "ACTIVE",
"tenant_id": "openstack",
- "updated": "2012-09-13T17:48:30Z",
+ "updated": "2012-11-15T19:27:04Z",
"user_id": "fake"
}
} \ No newline at end of file
diff --git a/doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.xml b/doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.xml
index e8d340a97..4b42c3586 100644
--- a/doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.xml
+++ b/doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.xml
@@ -1,5 +1,5 @@
<?xml version='1.0' encoding='UTF-8'?>
-<server xmlns:OS-EXT-SRV-ATTR="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="2012-09-14T11:34:17Z" hostId="fbaa82dd8c1948d351484640a7165d88a846902e1db2cc24dbaa23da" name="new-server-test" created="2012-09-14T11:34:17Z" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="ef63354b-dea8-4608-b209-0235ea9d4c47" OS-EXT-SRV-ATTR:instance_name="instance-00000001" OS-EXT-SRV-ATTR:host="90dd91c6b74e48ab8ee0e1aecefbd6e7" OS-EXT-SRV-ATTR:hypervisor_hostname="None">
+<server xmlns:OS-EXT-SRV-ATTR="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="2012-11-15T19:27:06Z" hostId="6b8205d183f40afad106dbeac44d3872151ef6f36790077ea2ea85fc" name="new-server-test" created="2012-11-15T19:27:05Z" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="ece641c1-51f5-4190-9342-d9751f28eead" OS-EXT-SRV-ATTR:instance_name="instance-00000001" OS-EXT-SRV-ATTR:host="80edfa5af48b4894b20eb1d9d2d4424e" OS-EXT-SRV-ATTR:hypervisor_hostname="fake-mini">
<image id="70a599e0-31e7-49b7-b260-868f441e862b">
<atom:link href="http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" rel="bookmark"/>
</image>
@@ -14,6 +14,6 @@
<ip version="4" addr="192.168.0.3"/>
</network>
</addresses>
- <atom:link href="http://openstack.example.com/v2/openstack/servers/ef63354b-dea8-4608-b209-0235ea9d4c47" rel="self"/>
- <atom:link href="http://openstack.example.com/openstack/servers/ef63354b-dea8-4608-b209-0235ea9d4c47" rel="bookmark"/>
+ <atom:link href="http://openstack.example.com/v2/openstack/servers/ece641c1-51f5-4190-9342-d9751f28eead" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/servers/ece641c1-51f5-4190-9342-d9751f28eead" rel="bookmark"/>
</server> \ No newline at end of file
diff --git a/doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.json b/doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.json
index f92181a34..db3de77f4 100644
--- a/doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.json
+++ b/doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.json
@@ -1,8 +1,8 @@
{
"servers": [
{
- "OS-EXT-SRV-ATTR:host": "c90b8445a27f4057ac2457d4f511a617",
- "OS-EXT-SRV-ATTR:hypervisor_hostname": null,
+ "OS-EXT-SRV-ATTR:host": "dd99797793774612b081a8be19bf721a",
+ "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
"OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
"accessIPv4": "",
"accessIPv6": "",
@@ -14,7 +14,7 @@
}
]
},
- "created": "2012-09-14T09:30:19Z",
+ "created": "2012-11-15T19:27:05Z",
"flavor": {
"id": "1",
"links": [
@@ -24,8 +24,8 @@
}
]
},
- "hostId": "5cb28923c8cc3f45fca3dd884249a8bf98f8a81900dd4b244d446cfd",
- "id": "f678aaae-1430-409d-8a48-efa08b885b25",
+ "hostId": "146245c049213a54b8c2352751518fcb4c2befd1b942b45a5a705d35",
+ "id": "e0c3563a-84ef-4d0b-bb80-23392cd23882",
"image": {
"id": "70a599e0-31e7-49b7-b260-868f441e862b",
"links": [
@@ -37,11 +37,11 @@
},
"links": [
{
- "href": "http://openstack.example.com/v2/openstack/servers/f678aaae-1430-409d-8a48-efa08b885b25",
+ "href": "http://openstack.example.com/v2/openstack/servers/e0c3563a-84ef-4d0b-bb80-23392cd23882",
"rel": "self"
},
{
- "href": "http://openstack.example.com/openstack/servers/f678aaae-1430-409d-8a48-efa08b885b25",
+ "href": "http://openstack.example.com/openstack/servers/e0c3563a-84ef-4d0b-bb80-23392cd23882",
"rel": "bookmark"
}
],
@@ -52,7 +52,7 @@
"progress": 0,
"status": "ACTIVE",
"tenant_id": "openstack",
- "updated": "2012-09-14T09:30:19Z",
+ "updated": "2012-11-15T19:27:05Z",
"user_id": "fake"
}
]
diff --git a/doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.xml b/doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.xml
index 4fb7a2f82..8179a3bf9 100644
--- a/doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.xml
+++ b/doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.xml
@@ -1,6 +1,6 @@
<?xml version='1.0' encoding='UTF-8'?>
<servers xmlns:OS-EXT-SRV-ATTR="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
- <server status="ACTIVE" updated="2012-09-14T11:34:18Z" hostId="6a0e019807cc227bcd091b89a45cc1c9eed430687ff2313e03ecfc84" name="new-server-test" created="2012-09-14T11:34:17Z" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="28f2c75f-61f3-44e5-b8c3-1725be74a831" OS-EXT-SRV-ATTR:instance_name="instance-00000001" OS-EXT-SRV-ATTR:host="d630e6a6e18b493bbe95c37df4144d58" OS-EXT-SRV-ATTR:hypervisor_hostname="None">
+ <server status="ACTIVE" updated="2012-11-15T19:27:06Z" hostId="b348a7376e2e61781829c9b45e63675aa0207632c25ce36c55a4fb2a" name="new-server-test" created="2012-11-15T19:27:06Z" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="3cadb9e9-f430-4f62-8b9b-3efb671ff1fa" OS-EXT-SRV-ATTR:instance_name="instance-00000001" OS-EXT-SRV-ATTR:host="2c4d049170fe409abc14942757d63a4e" OS-EXT-SRV-ATTR:hypervisor_hostname="fake-mini">
<image id="70a599e0-31e7-49b7-b260-868f441e862b">
<atom:link href="http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" rel="bookmark"/>
</image>
@@ -15,7 +15,7 @@
<ip version="4" addr="192.168.0.3"/>
</network>
</addresses>
- <atom:link href="http://openstack.example.com/v2/openstack/servers/28f2c75f-61f3-44e5-b8c3-1725be74a831" rel="self"/>
- <atom:link href="http://openstack.example.com/openstack/servers/28f2c75f-61f3-44e5-b8c3-1725be74a831" rel="bookmark"/>
+ <atom:link href="http://openstack.example.com/v2/openstack/servers/3cadb9e9-f430-4f62-8b9b-3efb671ff1fa" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/servers/3cadb9e9-f430-4f62-8b9b-3efb671ff1fa" rel="bookmark"/>
</server>
</servers> \ No newline at end of file
diff --git a/doc/api_samples/all_extensions/extensions-get-resp.json b/doc/api_samples/all_extensions/extensions-get-resp.json
index f4dfa0683..155a877c2 100644
--- a/doc/api_samples/all_extensions/extensions-get-resp.json
+++ b/doc/api_samples/all_extensions/extensions-get-resp.json
@@ -297,6 +297,14 @@
"updated": "2012-01-23T00:00:00+00:00"
},
{
+ "alias": "os-services",
+ "description": "Services support",
+ "links": [],
+ "name": "Services",
+ "namespace": "http://docs.openstack.org/compute/ext/services/api/v2",
+ "updated": "2012-10-28T00:00:00-00:00"
+ },
+ {
"alias": "os-simple-tenant-usage",
"description": "Simple tenant usage extension",
"links": [],
@@ -337,4 +345,4 @@
"updated": "2011-03-25T00:00:00+00:00"
}
]
-}
+} \ No newline at end of file
diff --git a/doc/api_samples/all_extensions/extensions-get-resp.xml b/doc/api_samples/all_extensions/extensions-get-resp.xml
index de18e5a2b..ad10c671b 100644
--- a/doc/api_samples/all_extensions/extensions-get-resp.xml
+++ b/doc/api_samples/all_extensions/extensions-get-resp.xml
@@ -125,6 +125,9 @@
<extension alias="os-server-start-stop" updated="2012-01-23T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/servers/api/v1.1" name="ServerStartStop">
<description>Start/Stop instance compute API support</description>
</extension>
+ <extension alias="os-services" updated="2012-10-28T00:00:00-00:00" namespace="http://docs.openstack.org/compute/ext/services/api/v2" name="Services">
+ <description>Services support</description>
+ </extension>
<extension alias="os-simple-tenant-usage" updated="2011-08-19T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/os-simple-tenant-usage/api/v1.1" name="SimpleTenantUsage">
<description>Simple tenant usage extension</description>
</extension>
@@ -140,4 +143,4 @@
<extension alias="os-volumes" updated="2011-03-25T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/volumes/api/v1.1" name="Volumes">
<description>Volumes support</description>
</extension>
-</extensions>
+</extensions> \ No newline at end of file
diff --git a/doc/api_samples/all_extensions/server-get-resp.json b/doc/api_samples/all_extensions/server-get-resp.json
index 3eb1870f9..a174bd779 100644
--- a/doc/api_samples/all_extensions/server-get-resp.json
+++ b/doc/api_samples/all_extensions/server-get-resp.json
@@ -1,8 +1,8 @@
{
"server": {
"OS-DCF:diskConfig": "AUTO",
- "OS-EXT-SRV-ATTR:host": "3972bc79fa2a4754b7559153a5a48422",
- "OS-EXT-SRV-ATTR:hypervisor_hostname": null,
+ "OS-EXT-SRV-ATTR:host": "543330fc7412414094e79c867798cefd",
+ "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
"OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
"OS-EXT-STS:power_state": 1,
"OS-EXT-STS:task_state": null,
@@ -18,7 +18,7 @@
]
},
"config_drive": "",
- "created": "2012-08-20T21:11:06Z",
+ "created": "2012-11-15T19:28:30Z",
"flavor": {
"id": "1",
"links": [
@@ -28,8 +28,8 @@
}
]
},
- "hostId": "dc23873c80c22f14705d190b645b59398cbc8ed3cdf6145468051c0d",
- "id": "7dc62bde-85f0-45b5-8c74-5fb209314807",
+ "hostId": "edc4f072b6ca46a2d95c717401aa9835a204d3e4e6b148a7faba9ab0",
+ "id": "05c070bf-1c34-4d99-901c-0f97a7239b86",
"image": {
"id": "70a599e0-31e7-49b7-b260-868f441e862b",
"links": [
@@ -42,11 +42,11 @@
"key_name": null,
"links": [
{
- "href": "http://openstack.example.com/v2/openstack/servers/7dc62bde-85f0-45b5-8c74-5fb209314807",
+ "href": "http://openstack.example.com/v2/openstack/servers/05c070bf-1c34-4d99-901c-0f97a7239b86",
"rel": "self"
},
{
- "href": "http://openstack.example.com/openstack/servers/7dc62bde-85f0-45b5-8c74-5fb209314807",
+ "href": "http://openstack.example.com/openstack/servers/05c070bf-1c34-4d99-901c-0f97a7239b86",
"rel": "bookmark"
}
],
@@ -62,7 +62,7 @@
],
"status": "ACTIVE",
"tenant_id": "openstack",
- "updated": "2012-08-20T21:11:06Z",
+ "updated": "2012-11-15T19:28:31Z",
"user_id": "fake"
}
} \ No newline at end of file
diff --git a/doc/api_samples/all_extensions/server-get-resp.xml b/doc/api_samples/all_extensions/server-get-resp.xml
index 6f2d8587f..8751a79ca 100644
--- a/doc/api_samples/all_extensions/server-get-resp.xml
+++ b/doc/api_samples/all_extensions/server-get-resp.xml
@@ -1,5 +1,5 @@
<?xml version='1.0' encoding='UTF-8'?>
-<server xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" xmlns:OS-EXT-SRV-ATTR="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:OS-EXT-STS="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="2012-08-29T18:31:11Z" hostId="28cb24b3eafec0079eaca92bb439843ccdbe0cc2597b3ad9956f2113" name="new-server-test" created="2012-08-29T18:31:11Z" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="f3053932-a09d-446d-ba6e-4303b6725317" key_name="None" config_drive="" OS-EXT-SRV-ATTR:vm_state="active" OS-EXT-SRV-ATTR:task_state="None" OS-EXT-SRV-ATTR:power_state="1" OS-EXT-SRV-ATTR:instance_name="instance-00000001" OS-EXT-SRV-ATTR:host="6f18ef4ea265447d8fe1b957b1e23ab4" OS-EXT-SRV-ATTR:hypervisor_hostname="None" OS-DCF:diskConfig="AUTO">
+<server xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" xmlns:OS-EXT-SRV-ATTR="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:OS-EXT-STS="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="2012-11-15T19:28:35Z" hostId="71b7ec711488460249e7b30d505a59e474454e58d379dbddb3655fa3" name="new-server-test" created="2012-11-15T19:28:35Z" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="72ecf76b-789e-4bc9-9807-e8bb4afe4f74" key_name="None" config_drive="" OS-EXT-SRV-ATTR:vm_state="active" OS-EXT-SRV-ATTR:task_state="None" OS-EXT-SRV-ATTR:power_state="1" OS-EXT-SRV-ATTR:instance_name="instance-00000001" OS-EXT-SRV-ATTR:host="748c8668e4324a82949d0e7f7e3003e2" OS-EXT-SRV-ATTR:hypervisor_hostname="fake-mini" OS-DCF:diskConfig="AUTO">
<image id="70a599e0-31e7-49b7-b260-868f441e862b">
<atom:link href="http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" rel="bookmark"/>
</image>
@@ -14,8 +14,8 @@
<ip version="4" addr="192.168.0.3"/>
</network>
</addresses>
- <atom:link href="http://openstack.example.com/v2/openstack/servers/f3053932-a09d-446d-ba6e-4303b6725317" rel="self"/>
- <atom:link href="http://openstack.example.com/openstack/servers/f3053932-a09d-446d-ba6e-4303b6725317" rel="bookmark"/>
+ <atom:link href="http://openstack.example.com/v2/openstack/servers/72ecf76b-789e-4bc9-9807-e8bb4afe4f74" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/servers/72ecf76b-789e-4bc9-9807-e8bb4afe4f74" rel="bookmark"/>
<security_groups>
<security_group name="default"/>
</security_groups>
diff --git a/doc/api_samples/all_extensions/servers-details-resp.json b/doc/api_samples/all_extensions/servers-details-resp.json
index dc2698fec..dd0b8ab05 100644
--- a/doc/api_samples/all_extensions/servers-details-resp.json
+++ b/doc/api_samples/all_extensions/servers-details-resp.json
@@ -2,8 +2,8 @@
"servers": [
{
"OS-DCF:diskConfig": "AUTO",
- "OS-EXT-SRV-ATTR:host": "1ed961df12f5434a8e54e0efd1049fa5",
- "OS-EXT-SRV-ATTR:hypervisor_hostname": null,
+ "OS-EXT-SRV-ATTR:host": "f2df66e47d1f427cbd106cf9058360cc",
+ "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
"OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
"OS-EXT-STS:power_state": 1,
"OS-EXT-STS:task_state": null,
@@ -19,7 +19,7 @@
]
},
"config_drive": "",
- "created": "2012-09-12T17:16:15Z",
+ "created": "2012-11-15T19:28:29Z",
"flavor": {
"id": "1",
"links": [
@@ -29,8 +29,8 @@
}
]
},
- "hostId": "c161a6e3edcd047317a6cfbe599d7711850fd871210b62fdd2f6479e",
- "id": "1d0f60bc-8c90-425d-8c64-cdaa49cd2303",
+ "hostId": "7eedbc35a14388e24ec12917b1eb0bd5dc969619a0e367591d55d9ef",
+ "id": "7d7b3dfc-3423-446f-b4b0-7fba038bf8b2",
"image": {
"id": "70a599e0-31e7-49b7-b260-868f441e862b",
"links": [
@@ -40,13 +40,14 @@
}
]
},
+ "key_name": null,
"links": [
{
- "href": "http://openstack.example.com/v2/openstack/servers/1d0f60bc-8c90-425d-8c64-cdaa49cd2303",
+ "href": "http://openstack.example.com/v2/openstack/servers/7d7b3dfc-3423-446f-b4b0-7fba038bf8b2",
"rel": "self"
},
{
- "href": "http://openstack.example.com/openstack/servers/1d0f60bc-8c90-425d-8c64-cdaa49cd2303",
+ "href": "http://openstack.example.com/openstack/servers/7d7b3dfc-3423-446f-b4b0-7fba038bf8b2",
"rel": "bookmark"
}
],
@@ -62,7 +63,7 @@
],
"status": "ACTIVE",
"tenant_id": "openstack",
- "updated": "2012-09-12T17:16:15Z",
+ "updated": "2012-11-15T19:28:30Z",
"user_id": "fake"
}
]
diff --git a/doc/api_samples/all_extensions/servers-details-resp.xml b/doc/api_samples/all_extensions/servers-details-resp.xml
index c769c8ec1..bbef848d9 100644
--- a/doc/api_samples/all_extensions/servers-details-resp.xml
+++ b/doc/api_samples/all_extensions/servers-details-resp.xml
@@ -1,6 +1,6 @@
<?xml version='1.0' encoding='UTF-8'?>
<servers xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" xmlns:OS-EXT-SRV-ATTR="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:OS-EXT-STS="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
- <server status="ACTIVE" updated="2012-09-12T17:16:23Z" hostId="95bc84e46706592751395e8651c7d28cb0c86722ae372fe97fb27f35" name="new-server-test" created="2012-09-12T17:16:23Z" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="2e6a40cf-4abb-44ba-8a1c-fdb666110b42" config_drive="" OS-EXT-SRV-ATTR:vm_state="active" OS-EXT-SRV-ATTR:task_state="None" OS-EXT-SRV-ATTR:power_state="1" OS-EXT-SRV-ATTR:instance_name="instance-00000001" OS-EXT-SRV-ATTR:host="530cfd748f4b4a24b3dc7015d5a0a02e" OS-EXT-SRV-ATTR:hypervisor_hostname="None" OS-DCF:diskConfig="AUTO">
+ <server status="ACTIVE" updated="2012-11-15T19:28:34Z" hostId="aa999c3533a60f2569f2fb142139cdc522b43f9d4153d80bc29bc6ad" name="new-server-test" created="2012-11-15T19:28:34Z" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="3e89bf3c-4b6b-41c3-aafd-f0dabf5a1172" key_name="None" config_drive="" OS-EXT-SRV-ATTR:vm_state="active" OS-EXT-SRV-ATTR:task_state="None" OS-EXT-SRV-ATTR:power_state="1" OS-EXT-SRV-ATTR:instance_name="instance-00000001" OS-EXT-SRV-ATTR:host="b82eb08d8d4042a99cdd2bfcc749e057" OS-EXT-SRV-ATTR:hypervisor_hostname="fake-mini" OS-DCF:diskConfig="AUTO">
<image id="70a599e0-31e7-49b7-b260-868f441e862b">
<atom:link href="http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" rel="bookmark"/>
</image>
@@ -15,8 +15,8 @@
<ip version="4" addr="192.168.0.3"/>
</network>
</addresses>
- <atom:link href="http://openstack.example.com/v2/openstack/servers/2e6a40cf-4abb-44ba-8a1c-fdb666110b42" rel="self"/>
- <atom:link href="http://openstack.example.com/openstack/servers/2e6a40cf-4abb-44ba-8a1c-fdb666110b42" rel="bookmark"/>
+ <atom:link href="http://openstack.example.com/v2/openstack/servers/3e89bf3c-4b6b-41c3-aafd-f0dabf5a1172" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/servers/3e89bf3c-4b6b-41c3-aafd-f0dabf5a1172" rel="bookmark"/>
<security_groups>
<security_group name="default"/>
</security_groups>
diff --git a/doc/api_samples/limit-get-resp.json b/doc/api_samples/limit-get-resp.json
index 29b7b80b6..1c5aa7a74 100644
--- a/doc/api_samples/limit-get-resp.json
+++ b/doc/api_samples/limit-get-resp.json
@@ -10,8 +10,6 @@
"maxTotalInstances": 10,
"maxTotalKeypairs": 100,
"maxTotalRAMSize": 51200,
- "maxTotalVolumeGigabytes": 1000,
- "maxTotalVolumes": 10,
"maxSecurityGroups": 10,
"maxSecurityGroupRules": 20
},
diff --git a/doc/api_samples/limit-get-resp.xml b/doc/api_samples/limit-get-resp.xml
index b3811a528..a47e0b22f 100644
--- a/doc/api_samples/limit-get-resp.xml
+++ b/doc/api_samples/limit-get-resp.xml
@@ -20,10 +20,8 @@
<limit name="maxImageMeta" value="128"/>
<limit name="maxPersonalitySize" value="10240"/>
<limit name="maxTotalKeypairs" value="100"/>
- <limit name="maxTotalVolumes" value="10"/>
<limit name="maxTotalCores" value="20"/>
<limit name="maxTotalFloatingIps" value="10"/>
- <limit name="maxTotalVolumeGigabytes" value="1000"/>
<limit name="maxTotalRAMSize" value="51200"/>
<limit name="maxSecurityGroups" value="10"/>
<limit name="maxSecurityGroupRules" value="20"/>
diff --git a/doc/api_samples/os-consoles/get-vnc-console-post-req.json b/doc/api_samples/os-consoles/get-vnc-console-post-req.json
new file mode 100644
index 000000000..1926119ce
--- /dev/null
+++ b/doc/api_samples/os-consoles/get-vnc-console-post-req.json
@@ -0,0 +1,5 @@
+{
+ "os-getVNCConsole": {
+ "type": "novnc"
+ }
+}
diff --git a/doc/api_samples/os-consoles/get-vnc-console-post-req.xml b/doc/api_samples/os-consoles/get-vnc-console-post-req.xml
new file mode 100644
index 000000000..00f32c6b9
--- /dev/null
+++ b/doc/api_samples/os-consoles/get-vnc-console-post-req.xml
@@ -0,0 +1,2 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<os-getVNCConsole type="novnc" />
diff --git a/doc/api_samples/os-consoles/get-vnc-console-post-resp.json b/doc/api_samples/os-consoles/get-vnc-console-post-resp.json
new file mode 100644
index 000000000..4bcaf4686
--- /dev/null
+++ b/doc/api_samples/os-consoles/get-vnc-console-post-resp.json
@@ -0,0 +1,6 @@
+{
+ "console": {
+ "type": "novnc",
+ "url": "http://example.com:6080/vnc_auto.html?token=f9906a48-b71e-4f18-baca-c987da3ebdb3&title=dafa(75ecef58-3b8e-4659-ab3b-5501454188e9)"
+ }
+}
diff --git a/doc/api_samples/os-consoles/get-vnc-console-post-resp.xml b/doc/api_samples/os-consoles/get-vnc-console-post-resp.xml
new file mode 100644
index 000000000..2d74fffa8
--- /dev/null
+++ b/doc/api_samples/os-consoles/get-vnc-console-post-resp.xml
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<console>
+ <type>novnc</type>
+ <url>http://example.com:6080/vnc_auto.html?token=f9906a48-b71e-4f18-baca-c987da3ebdb3&title=dafa(75ecef58-3b8e-4659-ab3b-5501454188e9)</url>
+</console>
diff --git a/doc/api_samples/os-consoles/server-post-req.json b/doc/api_samples/os-consoles/server-post-req.json
new file mode 100644
index 000000000..09366b4c9
--- /dev/null
+++ b/doc/api_samples/os-consoles/server-post-req.json
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "http://openstack.example.com/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/doc/api_samples/os-consoles/server-post-req.xml b/doc/api_samples/os-consoles/server-post-req.xml
new file mode 100644
index 000000000..077dd7618
--- /dev/null
+++ b/doc/api_samples/os-consoles/server-post-req.xml
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" flavorRef="http://openstack.example.com/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/doc/api_samples/os-consoles/server-post-resp.json b/doc/api_samples/os-consoles/server-post-resp.json
new file mode 100644
index 000000000..db9ed3d6d
--- /dev/null
+++ b/doc/api_samples/os-consoles/server-post-resp.json
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "MVk5HPrazHcG",
+ "id": "5bbcc3c4-1da2-4437-a48a-66f15b1b13f9",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/servers/5bbcc3c4-1da2-4437-a48a-66f15b1b13f9",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/servers/5bbcc3c4-1da2-4437-a48a-66f15b1b13f9",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/doc/api_samples/os-consoles/server-post-resp.xml b/doc/api_samples/os-consoles/server-post-resp.xml
new file mode 100644
index 000000000..68f0933c7
--- /dev/null
+++ b/doc/api_samples/os-consoles/server-post-resp.xml
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="5bbcc3c4-1da2-4437-a48a-66f15b1b13f9" adminPass="MVk5HPrazHcG">
+ <metadata/>
+ <atom:link href="http://openstack.example.com/v2/openstack/servers/5bbcc3c4-1da2-4437-a48a-66f15b1b13f9" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/servers/5bbcc3c4-1da2-4437-a48a-66f15b1b13f9" rel="bookmark"/>
+</server>
diff --git a/doc/api_samples/os-server-start-stop/server-post-req.json b/doc/api_samples/os-server-start-stop/server-post-req.json
new file mode 100644
index 000000000..d88eb4122
--- /dev/null
+++ b/doc/api_samples/os-server-start-stop/server-post-req.json
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "http://openstack.example.com/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-server-start-stop/server-post-req.xml b/doc/api_samples/os-server-start-stop/server-post-req.xml
new file mode 100644
index 000000000..0a3c8bb53
--- /dev/null
+++ b/doc/api_samples/os-server-start-stop/server-post-req.xml
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" flavorRef="http://openstack.example.com/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server> \ No newline at end of file
diff --git a/doc/api_samples/os-server-start-stop/server-post-resp.json b/doc/api_samples/os-server-start-stop/server-post-resp.json
new file mode 100644
index 000000000..09d9fb612
--- /dev/null
+++ b/doc/api_samples/os-server-start-stop/server-post-resp.json
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "xrDLoBeMD28B",
+ "id": "3f69b6bd-00a8-4636-96ee-650093624304",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/servers/3f69b6bd-00a8-4636-96ee-650093624304",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/servers/3f69b6bd-00a8-4636-96ee-650093624304",
+ "rel": "bookmark"
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-server-start-stop/server-post-resp.xml b/doc/api_samples/os-server-start-stop/server-post-resp.xml
new file mode 100644
index 000000000..7f84ac03d
--- /dev/null
+++ b/doc/api_samples/os-server-start-stop/server-post-resp.xml
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="6ed1d112-6c33-4c8b-9780-e2f978bf5ffd" adminPass="uF9wWxBh3mWL">
+ <metadata/>
+ <atom:link href="http://openstack.example.com/v2/openstack/servers/6ed1d112-6c33-4c8b-9780-e2f978bf5ffd" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/servers/6ed1d112-6c33-4c8b-9780-e2f978bf5ffd" rel="bookmark"/>
+</server> \ No newline at end of file
diff --git a/doc/api_samples/os-server-start-stop/server_start_stop.xml b/doc/api_samples/os-server-start-stop/server_start_stop.xml
index 69aac4af4..974e415a5 100644
--- a/doc/api_samples/os-server-start-stop/server_start_stop.xml
+++ b/doc/api_samples/os-server-start-stop/server_start_stop.xml
@@ -1,2 +1,2 @@
<?xml version="1.0" encoding="UTF-8"?>
-<stop/>
+<os-stop/> \ No newline at end of file
diff --git a/doc/api_samples/os-used-limits/usedlimits-get-resp.json b/doc/api_samples/os-used-limits/usedlimits-get-resp.json
index e24a1b8bb..f018be9be 100644
--- a/doc/api_samples/os-used-limits/usedlimits-get-resp.json
+++ b/doc/api_samples/os-used-limits/usedlimits-get-resp.json
@@ -12,15 +12,11 @@
"maxTotalInstances": 10,
"maxTotalKeypairs": 100,
"maxTotalRAMSize": 51200,
- "maxTotalVolumeGigabytes": 1000,
- "maxTotalVolumes": 10,
"totalCoresUsed": 0,
"totalInstancesUsed": 0,
"totalKeyPairsUsed": 0,
"totalRAMUsed": 0,
- "totalSecurityGroupsUsed": 0,
- "totalVolumeGigabytesUsed": 0,
- "totalVolumesUsed": 0
+ "totalSecurityGroupsUsed": 0
},
"rate": [
{
@@ -78,4 +74,4 @@
}
]
}
-} \ No newline at end of file
+}
diff --git a/doc/api_samples/os-used-limits/usedlimits-get-resp.xml b/doc/api_samples/os-used-limits/usedlimits-get-resp.xml
index ae14c8158..b86a41555 100644
--- a/doc/api_samples/os-used-limits/usedlimits-get-resp.xml
+++ b/doc/api_samples/os-used-limits/usedlimits-get-resp.xml
@@ -19,20 +19,16 @@
<limit name="maxPersonality" value="5"/>
<limit name="maxImageMeta" value="128"/>
<limit name="maxPersonalitySize" value="10240"/>
- <limit name="totalVolumesUsed" value="0"/>
<limit name="maxSecurityGroupRules" value="20"/>
<limit name="maxTotalKeypairs" value="100"/>
<limit name="totalCoresUsed" value="0"/>
- <limit name="maxTotalVolumes" value="10"/>
<limit name="totalRAMUsed" value="0"/>
<limit name="totalInstancesUsed" value="0"/>
<limit name="maxSecurityGroups" value="10"/>
- <limit name="totalVolumeGigabytesUsed" value="0"/>
<limit name="maxTotalCores" value="20"/>
<limit name="totalSecurityGroupsUsed" value="0"/>
<limit name="maxTotalFloatingIps" value="10"/>
<limit name="totalKeyPairsUsed" value="0"/>
- <limit name="maxTotalVolumeGigabytes" value="1000"/>
<limit name="maxTotalRAMSize" value="51200"/>
</absolute>
-</limits> \ No newline at end of file
+</limits>
diff --git a/doc/source/conf.py b/doc/source/conf.py
index b52bcad0d..804080e79 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -154,7 +154,9 @@ man_pages = [
('man/nova-scheduler', 'nova-scheduler', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-xvpvncproxy', 'nova-xvpvncproxy', u'Cloud controller fabric',
- [u'OpenStack'], 1)
+ [u'OpenStack'], 1),
+ ('man/nova-conductor', 'nova-conductor', u'Cloud controller fabric',
+ [u'OpenStack'], 1),
]
# -- Options for HTML output --------------------------------------------------
diff --git a/doc/source/man/nova-conductor.rst b/doc/source/man/nova-conductor.rst
new file mode 100644
index 000000000..7a32730e1
--- /dev/null
+++ b/doc/source/man/nova-conductor.rst
@@ -0,0 +1,45 @@
+==========
+nova-conductor
+==========
+
+--------------------------------
+Server for the Nova Conductor
+--------------------------------
+
+:Author: openstack@lists.launchpad.net
+:Date: 2012-11-16
+:Copyright: OpenStack LLC
+:Version: 2012.1
+:Manual section: 1
+:Manual group: cloud computing
+
+SYNOPSIS
+========
+
+ nova-conductor [options]
+
+DESCRIPTION
+===========
+
+nova-conductor is a server daemon that serves the Nova Conductor service, which provides coordination and database query support for Nova.
+
+OPTIONS
+=======
+
+ **General options**
+
+FILES
+========
+
+* /etc/nova/nova.conf
+
+SEE ALSO
+========
+
+* `OpenStack Nova <http://nova.openstack.org>`__
+* `OpenStack Nova <http://nova.openstack.org>`__
+
+BUGS
+====
+
+* Nova is sourced in Launchpad so you can view current bugs at `OpenStack Nova <http://nova.openstack.org>`__
diff --git a/etc/nova/api-paste.ini b/etc/nova/api-paste.ini
index 3970974c0..85603fe59 100644
--- a/etc/nova/api-paste.ini
+++ b/etc/nova/api-paste.ini
@@ -117,7 +117,7 @@ paste.app_factory = nova.api.openstack.volume.versions:Versions.factory
paste.filter_factory = nova.api.auth:NovaKeystoneContext.factory
[filter:authtoken]
-paste.filter_factory = keystone.middleware.auth_token:filter_factory
+paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
auth_host = 127.0.0.1
auth_port = 35357
auth_protocol = http
diff --git a/etc/nova/nova.conf.sample b/etc/nova/nova.conf.sample
index 2580fafc9..f44b034a0 100644
--- a/etc/nova/nova.conf.sample
+++ b/etc/nova/nova.conf.sample
@@ -89,9 +89,6 @@
# state_path=$pybasedir
#### (StrOpt) Top-level directory for maintaining nova's state
-# lock_path=$pybasedir
-#### (StrOpt) Directory to use for lock files
-
# fake_network=false
#### (BoolOpt) If passed, use fake network devices and addresses
@@ -150,16 +147,13 @@
# scheduler_topic=scheduler
#### (StrOpt) the topic scheduler nodes listen on
-# volume_topic=volume
-#### (StrOpt) the topic volume nodes listen on
-
# network_topic=network
#### (StrOpt) the topic network nodes listen on
# api_rate_limit=true
#### (BoolOpt) whether to rate limit the api
-# enabled_apis=ec2,osapi_compute,osapi_volume,metadata
+# enabled_apis=ec2,osapi_compute,metadata
#### (ListOpt) a list of APIs to enable by default
# ec2_host=$my_ip
@@ -186,14 +180,6 @@
# osapi_compute_extension=nova.api.openstack.compute.contrib.standard_extensions
#### (MultiStrOpt) osapi compute extension to load
-# osapi_volume_ext_list=
-#### (ListOpt) Specify list of extensions to load when using
-#### osapi_volume_extension option with
-#### nova.api.openstack.volume.contrib.select_extensions
-
-# osapi_volume_extension=nova.api.openstack.volume.contrib.standard_extensions
-#### (MultiStrOpt) osapi volume extension to load
-
# osapi_path=/v1.1/
#### (StrOpt) the path prefix used to call the openstack api server
@@ -268,9 +254,6 @@
# network_manager=nova.network.manager.VlanManager
#### (StrOpt) full class name for the Manager for network
-# volume_manager=nova.volume.manager.VolumeManager
-#### (StrOpt) full class name for the Manager for volume
-
# scheduler_manager=nova.scheduler.manager.SchedulerManager
#### (StrOpt) full class name for the Manager for scheduler
@@ -304,10 +287,7 @@
#### (StrOpt) The default format an ephemeral_volume will be formatted
#### with on creation.
-# root_helper=sudo
-#### (StrOpt) Deprecated: command to use for running commands as root
-
-# rootwrap_config=<None>
+# rootwrap_config=/etc/nova/rootwrap.conf
#### (StrOpt) Path to the rootwrap configuration file to use for running
#### commands as root
@@ -365,7 +345,7 @@
# network_api_class=nova.network.api.API
#### (StrOpt) The full class name of the network API class to use
-# volume_api_class=nova.volume.api.API
+# volume_api_class=nova.volume.cinder.API
#### (StrOpt) The full class name of the volume API class to use
# security_group_handler=nova.network.sg.NullSecurityGroupHandler
@@ -377,7 +357,7 @@
# auth_strategy=noauth
#### (StrOpt) The strategy to use for auth: noauth or keystone.
-# non_inheritable_image_properties=cache_in_nova,instance_uuid,user_id,image_type,backup_type,min_ram,min_disk
+# non_inheritable_image_properties=cache_in_nova,bittorrent
#### (ListOpt) These are image properties which a snapshot should not
#### inherit from an instance
@@ -385,6 +365,12 @@
#### (BoolOpt) Whether to batch up the application of IPTables rules during
#### a host restart and apply all at the end of the init phase
+# compute_driver=<None>
+#### (StrOpt) Driver to use for controlling virtualization. Options
+#### include: libvirt.LibvirtDriver, xenapi.XenAPIDriver,
+#### fake.FakeDriver, baremetal.BareMetalDriver,
+#### vmwareapi.VMWareESXDriver
+
######## defined in nova.notifications ########
@@ -394,6 +380,10 @@
#### notifications, True for notifications on any instance
#### changes.
+# notify_api_faults=false
+#### (BoolOpt) If set, send api.fault notifications on caught exceptions in
+#### the API service.
+
# notify_on_state_change=<None>
#### (StrOpt) If set, send compute.instance.update notifications on
#### instance state changes. Valid values are None for no
@@ -422,12 +412,6 @@
# quota_ram=51200
#### (IntOpt) megabytes of instance ram allowed per project
-# quota_volumes=10
-#### (IntOpt) number of volumes allowed per project
-
-# quota_gigabytes=1000
-#### (IntOpt) number of volume gigabytes allowed per project
-
# quota_floating_ips=10
#### (IntOpt) number of floating ips allowed per project
@@ -508,15 +492,6 @@
# metadata_workers=<None>
#### (IntOpt) Number of workers for metadata service
-# osapi_volume_listen=0.0.0.0
-#### (StrOpt) IP address for OpenStack Volume API to listen
-
-# osapi_volume_listen_port=8776
-#### (IntOpt) port for os volume api to listen
-
-# osapi_volume_workers=<None>
-#### (IntOpt) Number of workers for OpenStack Volume API service
-
######## defined in nova.test ########
@@ -527,6 +502,15 @@
#### (BoolOpt) should we use everything for testing
+######## defined in nova.wsgi ########
+
+# wsgi_log_format=%(client_ip)s "%(request_line)s" status: %(status_code)s len: %(body_length)s time: %(wall_seconds).7f
+#### (StrOpt) A python format string that is used as the template to
+#### generate log lines. The following values can be formatted
+#### into it: client_ip, date_time, request_line, status_code,
+#### body_length, wall_seconds.
+
+
######## defined in nova.api.auth ########
# use_forwarded_for=false
@@ -555,6 +539,9 @@
# ec2_strict_validation=true
#### (BoolOpt) Validate security group names according to EC2 specification
+# ec2_timestamp_expiry=300
+#### (IntOpt) Time in seconds before ec2 timestamp expires
+
######## defined in nova.api.metadata.base ########
@@ -590,12 +577,6 @@
#### (StrOpt) Netmask to push into openvpn config
-######## defined in nova.common.deprecated ########
-
-# fatal_deprecations=false
-#### (BoolOpt) make deprecations fatal
-
-
######## defined in nova.common.eventlet_backdoor ########
# backdoor_port=<None>
@@ -612,12 +593,6 @@
#### NOT the full path - just a folder name.For per-compute-host
#### cached images, set to _base_$my_ip
-# compute_driver=nova.virt.connection.get_connection
-#### (StrOpt) Driver to use for controlling virtualization. Options
-#### include: libvirt.LibvirtDriver, xenapi.XenAPIDriver,
-#### fake.FakeDriver, baremetal.BareMetalDriver,
-#### vmwareapi.VMWareESXDriver
-
# console_host=nova
#### (StrOpt) Console proxy host to use to connect to instances on this
#### host.
@@ -678,9 +653,6 @@
# reserved_host_memory_mb=512
#### (IntOpt) Amount of memory in MB to reserve for the host
-# claim_timeout_seconds=600
-#### (IntOpt) How long, in seconds, before a resource claim times out
-
# compute_stats_class=nova.compute.stats.Stats
#### (StrOpt) Class that will manage stats for the local compute host
@@ -750,9 +722,6 @@
# instance_name_template=instance-%08x
#### (StrOpt) Template string to be used to generate instance names
-# volume_name_template=volume-%s
-#### (StrOpt) Template string to be used to generate instance names
-
# snapshot_name_template=snapshot-%s
#### (StrOpt) Template string to be used to generate snapshot names
@@ -991,6 +960,15 @@
#### (IntOpt) port for s3 api to listen
+######## defined in nova.openstack.common.lockutils ########
+
+# disable_process_locking=false
+#### (BoolOpt) Whether to disable inter-process locks
+
+# lock_path=/usr/lib/python/site-packages/nova/openstack
+#### (StrOpt) Directory to use for lock files
+
+
######## defined in nova.openstack.common.log ########
# logdir=<None>
@@ -1008,13 +986,13 @@
# logging_context_format_string=%(asctime)s %(levelname)s %(name)s [%(request_id)s %(user_id)s %(project_id)s] %(instance)s%(message)s
#### (StrOpt) format string to use for log messages with context
-# logging_default_format_string=%(asctime)s %(levelname)s %(name)s [-] %(instance)s%(message)s
+# logging_default_format_string=%(asctime)s %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
#### (StrOpt) format string to use for log messages without context
-# logging_debug_format_suffix=from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d
+# logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d
#### (StrOpt) data to append to log format when level is DEBUG
-# logging_exception_prefix=%(asctime)s TRACE %(name)s %(instance)s
+# logging_exception_prefix=%(asctime)s %(process)d TRACE %(name)s %(instance)s
#### (StrOpt) prefix each line of exception output with this format
# default_log_levels=amqplib=WARN,sqlalchemy=WARN,boto=WARN,suds=INFO,keystone=INFO,eventlet.wsgi.server=WARN
@@ -1023,6 +1001,9 @@
# publish_errors=false
#### (BoolOpt) publish error events
+# fatal_deprecations=false
+#### (BoolOpt) make deprecations fatal
+
# instance_format="[instance: %(uuid)s] "
#### (StrOpt) If an instance is passed with the log message, format it
#### like this
@@ -1067,13 +1048,10 @@
#### (IntOpt) Seconds to wait before a cast expires (TTL). Only supported
#### by impl_zmq.
-# allowed_rpc_exception_modules=nova.openstack.common.exception,nova.exception
+# allowed_rpc_exception_modules=nova.openstack.common.exception,nova.exception,cinder.exception
#### (ListOpt) Modules of exceptions that are permitted to be recreatedupon
#### receiving exception data from an rpc call.
-# control_exchange=nova
-#### (StrOpt) AMQP exchange to connect to if using RabbitMQ or Qpid
-
# fake_rabbit=false
#### (BoolOpt) If passed, use a fake RabbitMQ provider
@@ -1093,10 +1071,13 @@
#### (StrOpt) SSL certification authority file (valid only if SSL enabled)
# rabbit_host=localhost
-#### (StrOpt) the RabbitMQ host
+#### (StrOpt) The RabbitMQ broker address where a single node is used
# rabbit_port=5672
-#### (IntOpt) the RabbitMQ port
+#### (IntOpt) The RabbitMQ broker port where a single node is used
+
+# rabbit_hosts=$rabbit_host:$rabbit_port
+#### (ListOpt) RabbitMQ HA cluster host:port pairs
# rabbit_use_ssl=false
#### (BoolOpt) connect over SSL for RabbitMQ
@@ -1124,6 +1105,10 @@
# rabbit_durable_queues=false
#### (BoolOpt) use durable queues in RabbitMQ
+# rabbit_ha_queues=false
+#### (BoolOpt) use H/A queues in RabbitMQ (x-ha-policy: all).You need to
+#### wipe RabbitMQ database when changing this option.
+
######## defined in nova.openstack.common.rpc.impl_qpid ########
@@ -1221,6 +1206,19 @@
#### (FloatOpt) virtual disk to physical disk allocation ratio
+######## defined in nova.scheduler.filters.io_ops_filter ########
+
+# max_io_ops_per_host=8
+#### (IntOpt) Ignore hosts that have too many
+#### builds/resizes/snaps/migrations
+
+
+######## defined in nova.scheduler.filters.num_instances_filter ########
+
+# max_instances_per_host=50
+#### (IntOpt) Ignore hosts that have too many instances
+
+
######## defined in nova.scheduler.filters.ram_filter ########
# ram_allocation_ratio=1.5
@@ -1229,19 +1227,19 @@
######## defined in nova.scheduler.filters.trusted_filter ########
-# server=<None>
+# attestation_server=<None>
#### (StrOpt) attestation server http
-# server_ca_file=<None>
+# attestation_server_ca_file=<None>
#### (StrOpt) attestation server Cert file for Identity verification
-# port=8443
+# attestation_port=8443
#### (StrOpt) attestation server port
-# api_url=/OpenAttestationWebServices/V1.0
+# attestation_api_url=/OpenAttestationWebServices/V1.0
#### (StrOpt) attestation web API URL
-# auth_blob=<None>
+# attestation_auth_blob=<None>
#### (StrOpt) attestation authorization blob - must change
@@ -1273,7 +1271,7 @@
######## defined in nova.scheduler.manager ########
-# scheduler_driver=nova.scheduler.multi.MultiScheduler
+# scheduler_driver=nova.scheduler.filter_scheduler.FilterScheduler
#### (StrOpt) Default driver to use for the scheduler
@@ -1282,9 +1280,6 @@
# compute_scheduler_driver=nova.scheduler.filter_scheduler.FilterScheduler
#### (StrOpt) Driver to use for scheduling compute calls
-# volume_scheduler_driver=nova.scheduler.chance.ChanceScheduler
-#### (StrOpt) Driver to use for scheduling volume calls
-
# default_scheduler_driver=nova.scheduler.chance.ChanceScheduler
#### (StrOpt) Default driver to use for scheduling calls
@@ -1295,12 +1290,6 @@
#### (StrOpt) Absolute path to scheduler configuration JSON file.
-######## defined in nova.scheduler.simple ########
-
-# max_gigabytes=10000
-#### (IntOpt) maximum number of volume gigabytes to allow per host
-
-
######## defined in nova.virt.baremetal.driver ########
# baremetal_type=baremetal
@@ -1328,6 +1317,10 @@
#### (StrOpt) Where to put temporary files associated with config drive
#### creation
+# force_config_drive=<None>
+#### (StrOpt) Set to force injection to take place on a config drive (if
+#### set, valid options are: always)
+
######## defined in nova.virt.disk.api ########
@@ -1458,10 +1451,6 @@
#### (BoolOpt) Use a separated OS thread pool to realize non-blocking
#### libvirt calls
-# force_config_drive=<None>
-#### (StrOpt) Set to force injection to take place on a config drive (if
-#### set, valid options are: always)
-
# libvirt_cpu_mode=<None>
#### (StrOpt) Set to "host-model" to clone the host CPU feature flags; to
#### "host-passthrough" to use the host CPU model exactly; to
@@ -1528,6 +1517,18 @@
#### (BoolOpt) Use virtio for bridge interfaces
+######## defined in nova.virt.libvirt.volume ########
+
+# num_iscsi_scan_tries=3
+#### (IntOpt) number of times to rescan iSCSI target to find volume
+
+# rbd_user=<None>
+#### (StrOpt) the RADOS client name for accessing rbd volumes
+
+# rbd_secret_uuid=<None>
+#### (StrOpt) the libvirt uuid of the secret for the rbd_uservolumes
+
+
######## defined in nova.virt.libvirt.volume_nfs ########
# nfs_mount_point_base=$state_path/mnt
@@ -1598,9 +1599,27 @@
######## defined in nova.virt.xenapi.agent ########
+# agent_timeout=30
+#### (IntOpt) number of seconds to wait for agent reply
+
# agent_version_timeout=300
#### (IntOpt) number of seconds to wait for agent to be fully operational
+# agent_resetnetwork_timeout=60
+#### (IntOpt) number of seconds to wait for agent reply to resetnetwork
+#### request
+
+# xenapi_agent_path=usr/sbin/xe-update-networking
+#### (StrOpt) Specifies the path in which the xenapi guest agent should be
+#### located. If the agent is present, network configuration is
+#### not injected into the image. Used if
+#### compute_driver=xenapi.XenAPIDriver and flat_injected=True
+
+# xenapi_disable_agent=false
+#### (BoolOpt) Disable XenAPI agent. Reduces the amount of time it takes
+#### nova to detect that a VM has started, when that VM does not
+#### have the agent installed
+
######## defined in nova.virt.xenapi.driver ########
@@ -1632,12 +1651,6 @@
#### (IntOpt) Max number of times to poll for VHD to coalesce. Used only
#### if compute_driver=xenapi.XenAPIDriver
-# xenapi_agent_path=usr/sbin/xe-update-networking
-#### (StrOpt) Specifies the path in which the xenapi guest agent should be
-#### located. If the agent is present, network configuration is
-#### not injected into the image. Used if
-#### compute_driver=xenapi.XenAPIDriver and flat_injected=True
-
# xenapi_sr_base_path=/var/run/sr-mount
#### (StrOpt) Base path to the storage repository
@@ -1703,6 +1716,38 @@
# xenapi_num_vbd_unplug_retries=10
#### (IntOpt) Maximum number of retries to unplug VBD
+# xenapi_torrent_images=none
+#### (StrOpt) Whether or not to download images via Bit Torrent
+#### (all|some|none).
+
+# xenapi_torrent_base_url=<None>
+#### (StrOpt) Base URL for torrent files.
+
+# xenapi_torrent_seed_chance=1.0
+#### (FloatOpt) Probability that peer will become a seeder. (1.0 = 100%)
+
+# xenapi_torrent_seed_duration=3600
+#### (IntOpt) Number of seconds after downloading an image via BitTorrent
+#### that it should be seeded for other peers.
+
+# xenapi_torrent_max_last_accessed=86400
+#### (IntOpt) Cached torrent files not accessed within this number of
+#### seconds can be reaped
+
+# xenapi_torrent_listen_port_start=6881
+#### (IntOpt) Beginning of port range to listen on
+
+# xenapi_torrent_listen_port_end=6891
+#### (IntOpt) End of port range to listen on
+
+# xenapi_torrent_download_stall_cutoff=600
+#### (IntOpt) Number of seconds a download can remain at the same progress
+#### percentage w/o being considered a stall
+
+# xenapi_torrent_max_seeder_processes_per_host=1
+#### (IntOpt) Maximum number of seeder processes to run concurrently
+#### within a given dom0. (-1 = no limit)
+
######## defined in nova.virt.xenapi.vmops ########
@@ -1713,9 +1758,6 @@
# xenapi_vif_driver=nova.virt.xenapi.vif.XenAPIBridgeDriver
#### (StrOpt) The XenAPI VIF driver using XenServer Network APIs.
-# xenapi_generate_swap=false
-#### (BoolOpt) Whether to generate swap (False means fetching it from OVA)
-
######## defined in nova.vnc ########
@@ -1750,13 +1792,6 @@
#### (StrOpt) Address that the XCP VNC proxy should bind to
-######## defined in nova.volume.api ########
-
-# snapshot_same_host=true
-#### (BoolOpt) Create volume from snapshot at the host where snapshot
-#### resides
-
-
######## defined in nova.volume.cinder ########
# cinder_catalog_info=volume:cinder:publicURL
@@ -1769,254 +1804,4 @@
#### endpoint e.g. http://localhost:8776/v1/%(project_id)s
-######## defined in nova.volume.driver ########
-
-# volume_group=nova-volumes
-#### (StrOpt) Name for the VG that will contain exported volumes
-
-# num_shell_tries=3
-#### (IntOpt) number of times to attempt to run flakey shell commands
-
-# num_iscsi_scan_tries=3
-#### (IntOpt) number of times to rescan iSCSI target to find volume
-
-# iscsi_num_targets=100
-#### (IntOpt) Number of iscsi target ids per host
-
-# iscsi_target_prefix=iqn.2010-10.org.openstack:
-#### (StrOpt) prefix for iscsi volumes
-
-# iscsi_ip_address=$my_ip
-#### (StrOpt) use this ip for iscsi
-
-# iscsi_port=3260
-#### (IntOpt) The port that the iSCSI daemon is listening on
-
-# rbd_pool=rbd
-#### (StrOpt) the RADOS pool in which rbd volumes are stored
-
-# rbd_user=<None>
-#### (StrOpt) the RADOS client name for accessing rbd volumes
-
-# rbd_secret_uuid=<None>
-#### (StrOpt) the libvirt uuid of the secret for the rbd_uservolumes
-
-# volume_tmp_dir=<None>
-#### (StrOpt) where to store temporary image files if the volume driver
-#### does not write them directly to the volume
-
-
-######## defined in nova.volume.iscsi ########
-
-# iscsi_helper=tgtadm
-#### (StrOpt) iscsi target user-land tool to use
-
-# volumes_dir=$state_path/volumes
-#### (StrOpt) Volume configuration file storage directory
-
-
-######## defined in nova.volume.manager ########
-
-# storage_availability_zone=nova
-#### (StrOpt) availability zone of this service
-
-# volume_driver=nova.volume.driver.ISCSIDriver
-#### (StrOpt) Driver to use for volume creation
-
-# use_local_volumes=true
-#### (BoolOpt) if True, will not discover local volumes
-
-# volume_force_update_capabilities=false
-#### (BoolOpt) if True will force update capabilities on each check
-
-
-######## defined in nova.volume.netapp ########
-
-# netapp_wsdl_url=<None>
-#### (StrOpt) URL of the WSDL file for the DFM server
-
-# netapp_login=<None>
-#### (StrOpt) User name for the DFM server
-
-# netapp_password=<None>
-#### (StrOpt) Password for the DFM server
-
-# netapp_server_hostname=<None>
-#### (StrOpt) Hostname for the DFM server
-
-# netapp_server_port=8088
-#### (IntOpt) Port number for the DFM server
-
-# netapp_storage_service=<None>
-#### (StrOpt) Storage service to use for provisioning (when
-#### volume_type=None)
-
-# netapp_storage_service_prefix=<None>
-#### (StrOpt) Prefix of storage service name to use for provisioning
-#### (volume_type name will be appended)
-
-# netapp_vfiler=<None>
-#### (StrOpt) Vfiler to use for provisioning
-
-
-######## defined in nova.volume.netapp_nfs ########
-
-# synchronous_snapshot_create=0
-#### (IntOpt) Does snapshot creation call returns immediately
-
-# netapp_wsdl_url=<None>
-#### (StrOpt) URL of the WSDL file for the DFM server
-
-# netapp_login=<None>
-#### (StrOpt) User name for the DFM server
-
-# netapp_password=<None>
-#### (StrOpt) Password for the DFM server
-
-# netapp_server_hostname=<None>
-#### (StrOpt) Hostname for the DFM server
-
-# netapp_server_port=8088
-#### (IntOpt) Port number for the DFM server
-
-# netapp_storage_service=<None>
-#### (StrOpt) Storage service to use for provisioning (when
-#### volume_type=None)
-
-# netapp_storage_service_prefix=<None>
-#### (StrOpt) Prefix of storage service name to use for provisioning
-#### (volume_type name will be appended)
-
-# netapp_vfiler=<None>
-#### (StrOpt) Vfiler to use for provisioning
-
-
-######## defined in nova.volume.nexenta.volume ########
-
-# nexenta_host=
-#### (StrOpt) IP address of Nexenta SA
-
-# nexenta_rest_port=2000
-#### (IntOpt) HTTP port to connect to Nexenta REST API server
-
-# nexenta_rest_protocol=auto
-#### (StrOpt) Use http or https for REST connection (default auto)
-
-# nexenta_user=admin
-#### (StrOpt) User name to connect to Nexenta SA
-
-# nexenta_password=nexenta
-#### (StrOpt) Password to connect to Nexenta SA
-
-# nexenta_iscsi_target_portal_port=3260
-#### (IntOpt) Nexenta target portal port
-
-# nexenta_volume=nova
-#### (StrOpt) pool on SA that will hold all volumes
-
-# nexenta_target_prefix=iqn.1986-03.com.sun:02:nova-
-#### (StrOpt) IQN prefix for iSCSI targets
-
-# nexenta_target_group_prefix=nova/
-#### (StrOpt) prefix for iSCSI target groups on SA
-
-# nexenta_blocksize=
-#### (StrOpt) block size for volumes (blank=default,8KB)
-
-# nexenta_sparse=false
-#### (BoolOpt) flag to create sparse volumes
-
-
-######## defined in nova.volume.nfs ########
-
-# nfs_shares_config=<None>
-#### (StrOpt) File with the list of available nfs shares
-
-# nfs_disk_util=df
-#### (StrOpt) Use du or df for free space calculation
-
-# nfs_sparsed_volumes=true
-#### (BoolOpt) Create volumes as sparsed files which take no space.If set
-#### to False volume is created as regular file.In such case
-#### volume creation takes a lot of time.
-
-
-######## defined in nova.volume.san ########
-
-# san_thin_provision=true
-#### (BoolOpt) Use thin provisioning for SAN volumes?
-
-# san_ip=
-#### (StrOpt) IP address of SAN controller
-
-# san_login=admin
-#### (StrOpt) Username for SAN controller
-
-# san_password=
-#### (StrOpt) Password for SAN controller
-
-# san_private_key=
-#### (StrOpt) Filename of private key to use for SSH authentication
-
-# san_clustername=
-#### (StrOpt) Cluster name to use for creating volumes
-
-# san_ssh_port=22
-#### (IntOpt) SSH port to use with SAN
-
-# san_is_local=false
-#### (BoolOpt) Execute commands locally instead of over SSH; use if the
-#### volume service is running on the SAN device
-
-# san_zfs_volume_base=rpool/
-#### (StrOpt) The ZFS path under which to create zvols for volumes.
-
-
-######## defined in nova.volume.solidfire ########
-
-# sf_emulate_512=true
-#### (BoolOpt) Set 512 byte emulation on volume creation;
-
-# sf_mvip=
-#### (StrOpt) IP address of SolidFire MVIP
-
-# sf_login=admin
-#### (StrOpt) Username for SF Cluster Admin
-
-# sf_password=
-#### (StrOpt) Password for SF Cluster Admin
-
-# sf_allow_tenant_qos=true
-#### (BoolOpt) Allow tenants to specify QOS on create
-
-
-######## defined in nova.volume.storwize_svc ########
-
-# storwize_svc_volpool_name=volpool
-#### (StrOpt) Storage system storage pool for volumes
-
-# storwize_svc_vol_rsize=2%
-#### (StrOpt) Storage system space-efficiency parameter for volumes
-
-# storwize_svc_vol_warning=0
-#### (StrOpt) Storage system threshold for volume capacity warnings
-
-# storwize_svc_vol_autoexpand=true
-#### (BoolOpt) Storage system autoexpand parameter for volumes (True/False)
-
-# storwize_svc_vol_grainsize=256
-#### (StrOpt) Storage system grain size parameter for volumes
-#### (32/64/128/256)
-
-# storwize_svc_vol_compression=false
-#### (BoolOpt) Storage system compression option for volumes
-
-# storwize_svc_vol_easytier=true
-#### (BoolOpt) Enable Easy Tier for volumes
-
-# storwize_svc_flashcopy_timeout=120
-#### (StrOpt) Maximum number of seconds to wait for FlashCopy to be
-#### prepared. Maximum value is 600 seconds (10 minutes).
-
-
-# Total option count: 527
+# Total option count: 462
diff --git a/etc/nova/policy.json b/etc/nova/policy.json
index bd015802a..fa21b71e9 100644
--- a/etc/nova/policy.json
+++ b/etc/nova/policy.json
@@ -36,6 +36,7 @@
"compute_extension:disk_config": "",
"compute_extension:extended_server_attributes": "rule:admin_api",
"compute_extension:extended_status": "",
+ "compute_extension:fixed_ips": "rule:admin_api",
"compute_extension:flavor_access": "",
"compute_extension:flavor_disabled": "",
"compute_extension:flavor_rxtx": "",
@@ -46,6 +47,8 @@
"compute_extension:floating_ip_dns": "",
"compute_extension:floating_ip_pools": "",
"compute_extension:floating_ips": "",
+ "compute_extension:fping": "",
+ "compute_extension:fping:all_tenants": "rule:admin_api",
"compute_extension:hosts": "rule:admin_api",
"compute_extension:hypervisors": "rule:admin_api",
"compute_extension:instance_usage_audit_log": "rule:admin_api",
@@ -59,6 +62,7 @@
"compute_extension:rescue": "",
"compute_extension:security_groups": "",
"compute_extension:server_diagnostics": "rule:admin_api",
+ "compute_extension:services": "rule:admin_api",
"compute_extension:simple_tenant_usage:show": "rule:admin_or_owner",
"compute_extension:simple_tenant_usage:list": "rule:admin_api",
"compute_extension:users": "rule:admin_api",
diff --git a/nova/api/auth.py b/nova/api/auth.py
index 1562aeede..167c897cf 100644
--- a/nova/api/auth.py
+++ b/nova/api/auth.py
@@ -23,7 +23,6 @@ import webob.exc
from nova import config
from nova import context
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py
index 1bf1f9f70..a4a52a22a 100644
--- a/nova/api/ec2/__init__.py
+++ b/nova/api/ec2/__init__.py
@@ -34,7 +34,6 @@ from nova.api import validator
from nova import config
from nova import context
from nova import exception
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
diff --git a/nova/api/ec2/apirequest.py b/nova/api/ec2/apirequest.py
index 6cd7c4431..c3b2065b4 100644
--- a/nova/api/ec2/apirequest.py
+++ b/nova/api/ec2/apirequest.py
@@ -25,8 +25,8 @@ import datetime
from xml.dom import minidom
from nova.api.ec2 import ec2utils
+from nova import config
from nova import exception
-from nova import flags
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index 8a7471951..a061931b0 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -36,7 +36,6 @@ from nova.compute import vm_states
from nova import config
from nova import db
from nova import exception
-from nova import flags
from nova.image import s3
from nova import network
from nova.openstack.common import log as logging
@@ -696,8 +695,8 @@ class CloudController(object):
else:
ec2_id = instance_id
validate_ec2_id(ec2_id)
- instance_id = ec2utils.ec2_id_to_id(ec2_id)
- instance = self.compute_api.get(context, instance_id)
+ instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_id)
+ instance = self.compute_api.get(context, instance_uuid)
output = self.compute_api.get_console_output(context, instance)
now = timeutils.utcnow()
return {"InstanceId": ec2_id,
@@ -807,8 +806,8 @@ class CloudController(object):
validate_ec2_id(instance_id)
validate_ec2_id(volume_id)
volume_id = ec2utils.ec2_vol_id_to_uuid(volume_id)
- instance_id = ec2utils.ec2_id_to_id(instance_id)
- instance = self.compute_api.get(context, instance_id)
+ instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, instance_id)
+ instance = self.compute_api.get(context, instance_uuid)
msg = _("Attach volume %(volume_id)s to instance %(instance_id)s"
" at %(device)s") % locals()
LOG.audit(msg, context=context)
@@ -822,7 +821,7 @@ class CloudController(object):
volume = self.volume_api.get(context, volume_id)
return {'attachTime': volume['attach_time'],
'device': volume['mountpoint'],
- 'instanceId': ec2utils.id_to_ec2_inst_id(instance_id),
+ 'instanceId': ec2utils.id_to_ec2_inst_id(instance_uuid),
'requestId': context.request_id,
'status': volume['attach_status'],
'volumeId': ec2utils.id_to_ec2_vol_id(volume_id)}
@@ -921,11 +920,10 @@ class CloudController(object):
raise exception.EC2APIError(
_('attribute not supported: %s') % attribute)
- ec2_instance_id = instance_id
validate_ec2_id(instance_id)
- instance_id = ec2utils.ec2_id_to_id(ec2_instance_id)
- instance = self.compute_api.get(context, instance_id)
- result = {'instance_id': ec2_instance_id}
+ instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, instance_id)
+ instance = self.compute_api.get(context, instance_uuid)
+ result = {'instance_id': instance_id}
fn(instance, result)
return result
@@ -958,8 +956,8 @@ class CloudController(object):
i['previousState'] = _state_description(previous_state['vm_state'],
previous_state['shutdown_terminate'])
try:
- internal_id = ec2utils.ec2_id_to_id(ec2_id)
- instance = self.compute_api.get(context, internal_id)
+ instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_id)
+ instance = self.compute_api.get(context, instance_uuid)
i['currentState'] = _state_description(instance['vm_state'],
instance['shutdown_terminate'])
except exception.NotFound:
@@ -1154,8 +1152,8 @@ class CloudController(object):
def associate_address(self, context, instance_id, public_ip, **kwargs):
LOG.audit(_("Associate address %(public_ip)s to"
" instance %(instance_id)s") % locals(), context=context)
- instance_id = ec2utils.ec2_id_to_id(instance_id)
- instance = self.compute_api.get(context, instance_id)
+ instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, instance_id)
+ instance = self.compute_api.get(context, instance_uuid)
cached_ipinfo = ec2utils.get_ip_info_for_instance(context, instance)
fixed_ips = cached_ipinfo['fixed_ips'] + cached_ipinfo['fixed_ip6s']
@@ -1249,8 +1247,8 @@ class CloudController(object):
instances = []
for ec2_id in instance_id:
validate_ec2_id(ec2_id)
- _instance_id = ec2utils.ec2_id_to_id(ec2_id)
- instance = self.compute_api.get(context, _instance_id)
+ instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_id)
+ instance = self.compute_api.get(context, instance_uuid)
instances.append(instance)
return instances
@@ -1513,8 +1511,8 @@ class CloudController(object):
name = kwargs.get('name')
validate_ec2_id(instance_id)
ec2_instance_id = instance_id
- instance_id = ec2utils.ec2_id_to_id(ec2_instance_id)
- instance = self.compute_api.get(context, instance_id)
+ instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_instance_id)
+ instance = self.compute_api.get(context, instance_uuid)
bdms = self.compute_api.get_instance_bdms(context, instance)
@@ -1544,7 +1542,7 @@ class CloudController(object):
start_time = time.time()
while vm_state != vm_states.STOPPED:
time.sleep(1)
- instance = self.compute_api.get(context, instance_id)
+ instance = self.compute_api.get(context, instance_uuid)
vm_state = instance['vm_state']
# NOTE(yamahata): timeout and error. 1 hour for now for safety.
# Is it too short/long?
diff --git a/nova/api/ec2/ec2utils.py b/nova/api/ec2/ec2utils.py
index de05aa903..238bbecb9 100644
--- a/nova/api/ec2/ec2utils.py
+++ b/nova/api/ec2/ec2utils.py
@@ -18,10 +18,10 @@
import re
+from nova import config
from nova import context
from nova import db
from nova import exception
-from nova import flags
from nova.network import model as network_model
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
@@ -252,10 +252,6 @@ def get_snapshot_uuid_from_int_id(context, int_id):
return db.get_snapshot_uuid_by_ec2_id(context, int_id)
-def ec2_instance_id_to_uuid(context, ec2_id):
- int_id = ec2_id_to_id(ec2_id)
- return db.instance_get(context, int_id)['uuid']
-
_c2u = re.compile('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))')
diff --git a/nova/api/ec2/faults.py b/nova/api/ec2/faults.py
index 331603a3a..fd2e46fcd 100644
--- a/nova/api/ec2/faults.py
+++ b/nova/api/ec2/faults.py
@@ -17,7 +17,6 @@ import webob.exc
from nova import config
from nova import context
-from nova import flags
from nova import utils
CONF = config.CONF
diff --git a/nova/api/manager.py b/nova/api/manager.py
index dc081d9a6..d74e0ad24 100644
--- a/nova/api/manager.py
+++ b/nova/api/manager.py
@@ -17,7 +17,6 @@
# under the License.
from nova import config
-from nova import flags
from nova import manager
from nova.openstack.common import importutils
diff --git a/nova/api/metadata/base.py b/nova/api/metadata/base.py
index 21fb4a7da..937938b40 100644
--- a/nova/api/metadata/base.py
+++ b/nova/api/metadata/base.py
@@ -21,13 +21,13 @@
import base64
import json
import os
+import posixpath
from nova.api.ec2 import ec2utils
from nova import block_device
from nova import config
from nova import context
from nova import db
-from nova import flags
from nova import network
from nova.openstack.common import cfg
from nova.virt import netutils
@@ -110,7 +110,7 @@ class InstanceMetadata():
self.ec2_ids = {}
self.ec2_ids['instance-id'] = ec2utils.id_to_ec2_inst_id(
- instance['id'])
+ instance['uuid'])
self.ec2_ids['ami-id'] = ec2utils.glance_id_to_ec2_id(ctxt,
instance['image_ref'])
@@ -314,9 +314,9 @@ class InstanceMetadata():
def lookup(self, path):
if path == "" or path[0] != "/":
- path = os.path.normpath("/" + path)
+ path = posixpath.normpath("/" + path)
else:
- path = os.path.normpath(path)
+ path = posixpath.normpath(path)
# fix up requests, prepending /ec2 to anything that does not match
path_tokens = path.split('/')[1:]
diff --git a/nova/api/metadata/handler.py b/nova/api/metadata/handler.py
index 14ec696cd..434d8deba 100644
--- a/nova/api/metadata/handler.py
+++ b/nova/api/metadata/handler.py
@@ -25,7 +25,6 @@ import webob.exc
from nova.api.metadata import base
from nova import config
from nova import exception
-from nova import flags
from nova.openstack.common import log as logging
from nova import wsgi
diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py
index d27b018c2..4808f9ce9 100644
--- a/nova/api/openstack/__init__.py
+++ b/nova/api/openstack/__init__.py
@@ -90,6 +90,15 @@ class APIMapper(routes.Mapper):
return result[0], result[1]
return routes.Mapper.routematch(self, url, environ)
+ def connect(self, *args, **kargs):
+ # NOTE(vish): Default the format part of a route to only accept json
+ # and xml so it doesn't eat all characters after a '.'
+ # in the url.
+ kargs.setdefault('requirements', {})
+ if not kargs['requirements'].get('format'):
+ kargs['requirements']['format'] = 'json|xml'
+ return routes.Mapper.connect(self, *args, **kargs)
+
class ProjectMapper(APIMapper):
def resource(self, member_name, collection_name, **kwargs):
@@ -180,5 +189,5 @@ class APIRouter(base_wsgi.Router):
resource.register_actions(controller)
resource.register_extensions(controller)
- def _setup_routes(self, mapper, ext_mgr):
- raise NotImplementedError
+ def _setup_routes(self, mapper, ext_mgr, init_only):
+ raise NotImplementedError()
diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py
index 78064012b..4d680fa94 100644
--- a/nova/api/openstack/auth.py
+++ b/nova/api/openstack/auth.py
@@ -23,7 +23,6 @@ import webob.exc
from nova.api.openstack import wsgi
from nova import config
from nova import context
-from nova import flags
from nova.openstack.common import log as logging
from nova import wsgi as base_wsgi
diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py
index 50ac76179..38247a55b 100644
--- a/nova/api/openstack/common.py
+++ b/nova/api/openstack/common.py
@@ -30,7 +30,6 @@ from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova import config
from nova import exception
-from nova import flags
from nova.openstack.common import log as logging
from nova import quota
diff --git a/nova/api/openstack/compute/__init__.py b/nova/api/openstack/compute/__init__.py
index e6704951f..dc18a01a5 100644
--- a/nova/api/openstack/compute/__init__.py
+++ b/nova/api/openstack/compute/__init__.py
@@ -32,7 +32,6 @@ from nova.api.openstack.compute import server_metadata
from nova.api.openstack.compute import servers
from nova.api.openstack.compute import versions
from nova import config
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
diff --git a/nova/api/openstack/compute/contrib/__init__.py b/nova/api/openstack/compute/contrib/__init__.py
index e6a1e9c4d..fc2943380 100644
--- a/nova/api/openstack/compute/contrib/__init__.py
+++ b/nova/api/openstack/compute/contrib/__init__.py
@@ -23,7 +23,6 @@ It can't be called 'extensions' because that causes namespacing problems.
from nova.api.openstack import extensions
from nova import config
-from nova import flags
from nova.openstack.common import log as logging
diff --git a/nova/api/openstack/compute/contrib/admin_actions.py b/nova/api/openstack/compute/contrib/admin_actions.py
index 1bac0851d..3614d7ba1 100644
--- a/nova/api/openstack/compute/contrib/admin_actions.py
+++ b/nova/api/openstack/compute/contrib/admin_actions.py
@@ -23,8 +23,8 @@ from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova.compute import vm_states
+from nova import config
from nova import exception
-from nova import flags
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
diff --git a/nova/api/openstack/compute/contrib/certificates.py b/nova/api/openstack/compute/contrib/certificates.py
index c05a208a3..c53fdabd5 100644
--- a/nova/api/openstack/compute/contrib/certificates.py
+++ b/nova/api/openstack/compute/contrib/certificates.py
@@ -20,7 +20,7 @@ from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
import nova.cert.rpcapi
-from nova import flags
+from nova import config
from nova import network
from nova.openstack.common import log as logging
diff --git a/nova/api/openstack/compute/contrib/cloudpipe.py b/nova/api/openstack/compute/contrib/cloudpipe.py
index 77d88144a..302ff7cf3 100644
--- a/nova/api/openstack/compute/contrib/cloudpipe.py
+++ b/nova/api/openstack/compute/contrib/cloudpipe.py
@@ -24,7 +24,6 @@ from nova.compute import vm_states
from nova import config
from nova import db
from nova import exception
-from nova import flags
from nova import network
from nova.openstack.common import fileutils
from nova.openstack.common import log as logging
diff --git a/nova/api/openstack/compute/contrib/config_drive.py b/nova/api/openstack/compute/contrib/config_drive.py
index ac294f660..b972cf9b9 100644
--- a/nova/api/openstack/compute/contrib/config_drive.py
+++ b/nova/api/openstack/compute/contrib/config_drive.py
@@ -21,7 +21,7 @@ from nova.api.openstack.compute import servers
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
-from nova import flags
+from nova import config
authorize = extensions.soft_extension_authorizer('compute', 'config_drive')
diff --git a/nova/api/openstack/compute/contrib/deferred_delete.py b/nova/api/openstack/compute/contrib/deferred_delete.py
index ea7ac00f9..14b608677 100644
--- a/nova/api/openstack/compute/contrib/deferred_delete.py
+++ b/nova/api/openstack/compute/contrib/deferred_delete.py
@@ -43,7 +43,8 @@ class DeferredDeleteController(wsgi.Controller):
try:
self.compute_api.restore(context, instance)
except exception.QuotaError as error:
- raise exc.HTTPRequestEntityTooLarge(explanation=unicode(error),
+ raise webob.exc.HTTPRequestEntityTooLarge(
+ explanation=unicode(error),
headers={'Retry-After': 0})
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
diff --git a/nova/api/openstack/compute/contrib/extended_server_attributes.py b/nova/api/openstack/compute/contrib/extended_server_attributes.py
index 15f6456ea..06847c727 100644
--- a/nova/api/openstack/compute/contrib/extended_server_attributes.py
+++ b/nova/api/openstack/compute/contrib/extended_server_attributes.py
@@ -18,8 +18,8 @@ from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
+from nova import config
from nova import db
-from nova import flags
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
diff --git a/nova/api/openstack/compute/contrib/extended_status.py b/nova/api/openstack/compute/contrib/extended_status.py
index f7ccdcbff..55ff930f2 100644
--- a/nova/api/openstack/compute/contrib/extended_status.py
+++ b/nova/api/openstack/compute/contrib/extended_status.py
@@ -18,7 +18,7 @@ from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
-from nova import flags
+from nova import config
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
diff --git a/nova/api/openstack/compute/contrib/fixed_ips.py b/nova/api/openstack/compute/contrib/fixed_ips.py
new file mode 100644
index 000000000..da20f5334
--- /dev/null
+++ b/nova/api/openstack/compute/contrib/fixed_ips.py
@@ -0,0 +1,98 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 IBM
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import webob.exc
+
+from nova.api.openstack import extensions
+from nova import db
+from nova import exception
+from nova.openstack.common import log as logging
+
+LOG = logging.getLogger(__name__)
+authorize = extensions.extension_authorizer('compute', 'fixed_ips')
+
+
+class FixedIPController(object):
+ def show(self, req, id):
+ """Return data about the given fixed ip."""
+ context = req.environ['nova.context']
+ authorize(context)
+
+ try:
+ fixed_ip = db.fixed_ip_get_by_address_detailed(context, id)
+ except exception.FixedIpNotFoundForAddress as ex:
+ raise webob.exc.HTTPNotFound(explanation=str(ex))
+
+ fixed_ip_info = {"fixed_ip": {}}
+ if fixed_ip[1] is None:
+ msg = _("Fixed IP %s has been deleted") % id
+ raise webob.exc.HTTPNotFound(explanation=msg)
+
+ fixed_ip_info['fixed_ip']['cidr'] = fixed_ip[1]['cidr']
+ fixed_ip_info['fixed_ip']['address'] = fixed_ip[0]['address']
+
+ if fixed_ip[2]:
+ fixed_ip_info['fixed_ip']['hostname'] = fixed_ip[2]['hostname']
+ fixed_ip_info['fixed_ip']['host'] = fixed_ip[2]['host']
+ else:
+ fixed_ip_info['fixed_ip']['hostname'] = None
+ fixed_ip_info['fixed_ip']['host'] = None
+
+ return fixed_ip_info
+
+ def action(self, req, id, body):
+ context = req.environ['nova.context']
+ authorize(context)
+ if 'reserve' in body:
+ return self._set_reserved(context, id, True)
+ elif 'unreserve' in body:
+ return self._set_reserved(context, id, False)
+ else:
+ raise webob.exc.HTTPBadRequest(
+ explanation="No valid action specified")
+
+ def _set_reserved(self, context, address, reserved):
+ try:
+ fixed_ip = db.fixed_ip_get_by_address(context, address)
+ db.fixed_ip_update(context, fixed_ip['address'],
+ {'reserved': reserved})
+ except exception.FixedIpNotFoundForAddress:
+ msg = _("Fixed IP %s not found") % address
+ raise webob.exc.HTTPNotFound(explanation=msg)
+
+ return webob.exc.HTTPAccepted()
+
+
+class Fixed_ips(extensions.ExtensionDescriptor):
+ """Fixed IPs support"""
+
+ name = "FixedIPs"
+ alias = "os-fixed-ips"
+ namespace = "http://docs.openstack.org/compute/ext/fixed_ips/api/v2"
+ updated = "2012-10-18T13:25:27-06:00"
+
+ def __init__(self, ext_mgr):
+ ext_mgr.register(self)
+
+ def get_resources(self):
+ member_actions = {'action': 'POST'}
+ resources = []
+ resource = extensions.ResourceExtension('os-fixed-ips',
+ FixedIPController(),
+ member_actions=member_actions)
+ resources.append(resource)
+ return resources
diff --git a/nova/api/openstack/compute/contrib/fping.py b/nova/api/openstack/compute/contrib/fping.py
new file mode 100644
index 000000000..890119570
--- /dev/null
+++ b/nova/api/openstack/compute/contrib/fping.py
@@ -0,0 +1,162 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 Grid Dynamics
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import itertools
+import os
+import time
+
+from webob import exc
+
+from nova.api.openstack import common
+from nova.api.openstack import extensions
+from nova import compute
+from nova import config
+from nova import exception
+from nova.openstack.common import cfg
+from nova.openstack.common import log as logging
+from nova import utils
+
+
+LOG = logging.getLogger(__name__)
+authorize = extensions.extension_authorizer('compute', 'fping')
+authorize_all_tenants = extensions.extension_authorizer(
+ 'compute', 'fping:all_tenants')
+fping_opts = [
+ cfg.StrOpt("fping_path",
+ default="/usr/sbin/fping",
+ help="Full path to fping."),
+]
+
+CONF = config.CONF
+CONF.register_opts(fping_opts)
+
+
+class FpingController(object):
+
+ def __init__(self, network_api=None):
+ self.compute_api = compute.API()
+ self.last_call = {}
+
+ def check_fping(self):
+ if not os.access(CONF.fping_path, os.X_OK):
+ raise exc.HTTPServiceUnavailable(
+ explanation=_("fping utility is not found."))
+
+ @staticmethod
+ def fping(ips):
+ fping_ret = utils.execute(CONF.fping_path, *ips,
+ check_exit_code=False)
+ if not fping_ret:
+ return set()
+ alive_ips = set()
+ for line in fping_ret[0].split("\n"):
+ ip = line.split(" ", 1)[0]
+ if "alive" in line:
+ alive_ips.add(ip)
+ return alive_ips
+
+ @staticmethod
+ def _get_instance_ips(context, instance):
+ ret = []
+ for network in common.get_networks_for_instance(
+ context, instance).values():
+ all_ips = itertools.chain(network["ips"], network["floating_ips"])
+ ret += [ip["address"] for ip in all_ips]
+ return ret
+
+ def index(self, req):
+ context = req.environ["nova.context"]
+ search_opts = dict(deleted=False)
+ if "all_tenants" in req.GET:
+ authorize_all_tenants(context)
+ else:
+ authorize(context)
+ if context.project_id:
+ search_opts["project_id"] = context.project_id
+ else:
+ search_opts["user_id"] = context.user_id
+ self.check_fping()
+ include = req.GET.get("include", None)
+ if include:
+ include = set(include.split(","))
+ exclude = set()
+ else:
+ include = None
+ exclude = req.GET.get("exclude", None)
+ if exclude:
+ exclude = set(exclude.split(","))
+ else:
+ exclude = set()
+
+ instance_list = self.compute_api.get_all(
+ context, search_opts=search_opts)
+ ip_list = []
+ instance_ips = {}
+ instance_projects = {}
+
+ for instance in instance_list:
+ uuid = instance["uuid"]
+ if uuid in exclude or (include is not None and
+ uuid not in include):
+ continue
+ ips = [str(ip) for ip in self._get_instance_ips(context, instance)]
+ instance_ips[uuid] = ips
+ instance_projects[uuid] = instance["project_id"]
+ ip_list += ips
+ alive_ips = self.fping(ip_list)
+ res = []
+ for instance_uuid, ips in instance_ips.iteritems():
+ res.append({
+ "id": instance_uuid,
+ "project_id": instance_projects[instance_uuid],
+ "alive": bool(set(ips) & alive_ips),
+ })
+ return {"servers": res}
+
+ def show(self, req, id):
+ try:
+ context = req.environ["nova.context"]
+ authorize(context)
+ self.check_fping()
+ instance = self.compute_api.get(context, id)
+ ips = [str(ip) for ip in self._get_instance_ips(context, instance)]
+ alive_ips = self.fping(ips)
+ return {
+ "server": {
+ "id": instance["uuid"],
+ "project_id": instance["project_id"],
+ "alive": bool(set(ips) & alive_ips),
+ }
+ }
+ except exception.NotFound:
+ raise exc.HTTPNotFound()
+
+
+class Fping(extensions.ExtensionDescriptor):
+ """Fping Management Extension."""
+
+ name = "Fping"
+ alias = "os-fping"
+ namespace = "http://docs.openstack.org/compute/ext/fping/api/v1.1"
+ updated = "2012-07-06T00:00:00+00:00"
+
+ def get_resources(self):
+ res = extensions.ResourceExtension(
+ "os-fping",
+ FpingController())
+ return [res]
diff --git a/nova/api/openstack/compute/contrib/hosts.py b/nova/api/openstack/compute/contrib/hosts.py
index 237872405..32582548c 100644
--- a/nova/api/openstack/compute/contrib/hosts.py
+++ b/nova/api/openstack/compute/contrib/hosts.py
@@ -23,9 +23,9 @@ from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova.compute import api as compute_api
+from nova import config
from nova import db
from nova import exception
-from nova import flags
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
diff --git a/nova/api/openstack/compute/contrib/instance_usage_audit_log.py b/nova/api/openstack/compute/contrib/instance_usage_audit_log.py
index 7c98cb8d6..31a66b8fe 100644
--- a/nova/api/openstack/compute/contrib/instance_usage_audit_log.py
+++ b/nova/api/openstack/compute/contrib/instance_usage_audit_log.py
@@ -23,7 +23,6 @@ import webob.exc
from nova.api.openstack import extensions
from nova import config
from nova import db
-from nova import flags
from nova import utils
CONF = config.CONF
diff --git a/nova/api/openstack/compute/contrib/networks.py b/nova/api/openstack/compute/contrib/networks.py
index 4537e1ec7..e7b7ac8a6 100644
--- a/nova/api/openstack/compute/contrib/networks.py
+++ b/nova/api/openstack/compute/contrib/networks.py
@@ -21,8 +21,8 @@ import webob
from webob import exc
from nova.api.openstack import extensions
+from nova import config
from nova import exception
-from nova import flags
from nova import network
from nova.openstack.common import log as logging
diff --git a/nova/api/openstack/compute/contrib/rescue.py b/nova/api/openstack/compute/contrib/rescue.py
index 054eaf870..b76b54e8b 100644
--- a/nova/api/openstack/compute/contrib/rescue.py
+++ b/nova/api/openstack/compute/contrib/rescue.py
@@ -23,7 +23,6 @@ from nova.api.openstack import wsgi
from nova import compute
from nova import config
from nova import exception
-from nova import flags
from nova.openstack.common import log as logging
from nova import utils
diff --git a/nova/api/openstack/compute/contrib/security_groups.py b/nova/api/openstack/compute/contrib/security_groups.py
index b86397694..ac5206a63 100644
--- a/nova/api/openstack/compute/contrib/security_groups.py
+++ b/nova/api/openstack/compute/contrib/security_groups.py
@@ -27,9 +27,9 @@ from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
from nova.compute import api as compute_api
+from nova import config
from nova import db
from nova import exception
-from nova import flags
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
diff --git a/nova/api/openstack/compute/contrib/services.py b/nova/api/openstack/compute/contrib/services.py
new file mode 100644
index 000000000..6c3a6688d
--- /dev/null
+++ b/nova/api/openstack/compute/contrib/services.py
@@ -0,0 +1,141 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 IBM
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import webob.exc
+
+from nova.api.openstack import extensions
+from nova.api.openstack import wsgi
+from nova.api.openstack import xmlutil
+from nova import config
+from nova import db
+from nova import exception
+from nova.openstack.common import log as logging
+from nova.openstack.common import timeutils
+from nova import utils
+
+
+LOG = logging.getLogger(__name__)
+authorize = extensions.extension_authorizer('compute', 'services')
+CONF = config.CONF
+
+
+class ServicesIndexTemplate(xmlutil.TemplateBuilder):
+ def construct(self):
+ root = xmlutil.TemplateElement('services')
+ elem = xmlutil.SubTemplateElement(root, 'service', selector='services')
+ elem.set('binary')
+ elem.set('host')
+ elem.set('zone')
+ elem.set('status')
+ elem.set('state')
+ elem.set('update_at')
+
+ return xmlutil.MasterTemplate(root, 1)
+
+
+class ServicesUpdateTemplate(xmlutil.TemplateBuilder):
+ def construct(self):
+ root = xmlutil.TemplateElement('host')
+ root.set('host')
+ root.set('service')
+ root.set('disabled')
+
+ return xmlutil.MasterTemplate(root, 1)
+
+
+class ServiceController(object):
+ @wsgi.serializers(xml=ServicesIndexTemplate)
+ def index(self, req):
+ """
+ Return a list of all running services. Filter by host & service name.
+ """
+ context = req.environ['nova.context']
+ authorize(context)
+ now = timeutils.utcnow()
+ services = db.service_get_all(context)
+
+ host = ''
+ if 'host' in req.GET:
+ host = req.GET['host']
+ service = ''
+ if 'service' in req.GET:
+ service = req.GET['service']
+ if host:
+ services = [s for s in services if s['host'] == host]
+ if service:
+ services = [s for s in services if s['binary'] == service]
+
+ svcs = []
+ for svc in services:
+ delta = now - (svc['updated_at'] or svc['created_at'])
+ alive = abs(utils.total_seconds(delta)) <= CONF.service_down_time
+ art = (alive and "up") or "down"
+ active = 'enabled'
+ if svc['disabled']:
+ active = 'disabled'
+ svcs.append({"binary": svc['binary'], 'host': svc['host'],
+ 'zone': svc['availability_zone'],
+ 'status': active, 'state': art,
+ 'updated_at': svc['updated_at']})
+ return {'services': svcs}
+
+ @wsgi.serializers(xml=ServicesUpdateTemplate)
+ def update(self, req, id, body):
+ """Enable/Disable scheduling for a service"""
+ context = req.environ['nova.context']
+ authorize(context)
+
+ if id == "enable":
+ disabled = False
+ elif id == "disable":
+ disabled = True
+ else:
+ raise webob.exc.HTTPNotFound("Unknown action")
+
+ try:
+ host = body['host']
+ service = body['service']
+ except (TypeError, KeyError):
+ raise webob.exc.HTTPUnprocessableEntity()
+
+ try:
+ svc = db.service_get_by_args(context, host, service)
+ if not svc:
+ raise webob.exc.HTTPNotFound('Unknown service')
+
+ db.service_update(context, svc['id'], {'disabled': disabled})
+ except exception.ServiceNotFound:
+ raise webob.exc.HTTPNotFound("service not found")
+
+ return {'host': host, 'service': service, 'disabled': disabled}
+
+
+class Services(extensions.ExtensionDescriptor):
+ """Services support"""
+
+ name = "Services"
+ alias = "os-services"
+ namespace = "http://docs.openstack.org/compute/ext/services/api/v2"
+ updated = "2012-10-28T00:00:00-00:00"
+
+ def get_resources(self):
+ resources = []
+ resource = extensions.ResourceExtension('os-services',
+ ServiceController())
+ resources.append(resource)
+ return resources
diff --git a/nova/api/openstack/compute/contrib/simple_tenant_usage.py b/nova/api/openstack/compute/contrib/simple_tenant_usage.py
index f6e9a63f6..c51faef1b 100644
--- a/nova/api/openstack/compute/contrib/simple_tenant_usage.py
+++ b/nova/api/openstack/compute/contrib/simple_tenant_usage.py
@@ -22,8 +22,8 @@ from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova.compute import api
+from nova import config
from nova import exception
-from nova import flags
from nova.openstack.common import timeutils
authorize_show = extensions.extension_authorizer('compute',
diff --git a/nova/api/openstack/compute/contrib/volumes.py b/nova/api/openstack/compute/contrib/volumes.py
index 1de6134ad..c58a70d3c 100644
--- a/nova/api/openstack/compute/contrib/volumes.py
+++ b/nova/api/openstack/compute/contrib/volumes.py
@@ -24,8 +24,8 @@ from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
+from nova import config
from nova import exception
-from nova import flags
from nova.openstack.common import log as logging
from nova import utils
from nova import volume
@@ -127,20 +127,20 @@ class CommonDeserializer(wsgi.MetadataXMLDeserializer):
def _extract_volume(self, node):
"""Marshal the volume attribute of a parsed request."""
- volume = {}
+ vol = {}
volume_node = self.find_first_child_named(node, 'volume')
attributes = ['display_name', 'display_description', 'size',
'volume_type', 'availability_zone']
for attr in attributes:
if volume_node.getAttribute(attr):
- volume[attr] = volume_node.getAttribute(attr)
+ vol[attr] = volume_node.getAttribute(attr)
metadata_node = self.find_first_child_named(volume_node, 'metadata')
if metadata_node is not None:
- volume['metadata'] = self.extract_metadata(metadata_node)
+ vol['metadata'] = self.extract_metadata(metadata_node)
- return volume
+ return vol
class CreateDeserializer(CommonDeserializer):
@@ -153,8 +153,8 @@ class CreateDeserializer(CommonDeserializer):
def default(self, string):
"""Deserialize an xml-formatted volume create request."""
dom = minidom.parseString(string)
- volume = self._extract_volume(dom)
- return {'body': {'volume': volume}}
+ vol = self._extract_volume(dom)
+ return {'body': {'volume': vol}}
class VolumeController(wsgi.Controller):
@@ -185,8 +185,8 @@ class VolumeController(wsgi.Controller):
LOG.audit(_("Delete volume with id: %s"), id, context=context)
try:
- volume = self.volume_api.get(context, id)
- self.volume_api.delete(context, volume)
+ vol = self.volume_api.get(context, id)
+ self.volume_api.delete(context, vol)
except exception.NotFound:
raise exc.HTTPNotFound()
return webob.Response(status_int=202)
@@ -581,7 +581,7 @@ class SnapshotController(wsgi.Controller):
snapshot = body['snapshot']
volume_id = snapshot['volume_id']
- volume = self.volume_api.get(context, volume_id)
+ vol = self.volume_api.get(context, volume_id)
force = snapshot.get('force', False)
LOG.audit(_("Create snapshot from volume %s"), volume_id,
@@ -593,12 +593,12 @@ class SnapshotController(wsgi.Controller):
if utils.bool_from_str(force):
new_snapshot = self.volume_api.create_snapshot_force(context,
- volume,
+ vol,
snapshot.get('display_name'),
snapshot.get('display_description'))
else:
new_snapshot = self.volume_api.create_snapshot(context,
- volume,
+ vol,
snapshot.get('display_name'),
snapshot.get('display_description'))
diff --git a/nova/api/openstack/compute/extensions.py b/nova/api/openstack/compute/extensions.py
index c46a6b034..4805ba4e6 100644
--- a/nova/api/openstack/compute/extensions.py
+++ b/nova/api/openstack/compute/extensions.py
@@ -17,7 +17,6 @@
from nova.api.openstack import extensions as base_extensions
from nova import config
-from nova import flags
from nova.openstack.common import log as logging
from nova.openstack.common.plugin import pluginmanager
diff --git a/nova/api/openstack/compute/image_metadata.py b/nova/api/openstack/compute/image_metadata.py
index 3bc817076..375bbc4d5 100644
--- a/nova/api/openstack/compute/image_metadata.py
+++ b/nova/api/openstack/compute/image_metadata.py
@@ -19,8 +19,8 @@ from webob import exc
from nova.api.openstack import common
from nova.api.openstack import wsgi
+from nova import config
from nova import exception
-from nova import flags
from nova.image import glance
diff --git a/nova/api/openstack/compute/images.py b/nova/api/openstack/compute/images.py
index 0c280618e..bdd0e0916 100644
--- a/nova/api/openstack/compute/images.py
+++ b/nova/api/openstack/compute/images.py
@@ -19,8 +19,8 @@ from nova.api.openstack import common
from nova.api.openstack.compute.views import images as views_images
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
+from nova import config
from nova import exception
-from nova import flags
import nova.image.glance
from nova.openstack.common import log as logging
import nova.utils
diff --git a/nova/api/openstack/compute/ips.py b/nova/api/openstack/compute/ips.py
index ec9759759..a01066d0e 100644
--- a/nova/api/openstack/compute/ips.py
+++ b/nova/api/openstack/compute/ips.py
@@ -22,7 +22,7 @@ from nova.api.openstack import common
from nova.api.openstack.compute.views import addresses as view_addresses
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
-from nova import flags
+from nova import config
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
diff --git a/nova/api/openstack/compute/limits.py b/nova/api/openstack/compute/limits.py
index c0ef65670..767280a45 100644
--- a/nova/api/openstack/compute/limits.py
+++ b/nova/api/openstack/compute/limits.py
@@ -212,6 +212,7 @@ DEFAULT_LIMITS = [
Limit("PUT", "*", ".*", 10, PER_MINUTE),
Limit("GET", "*changes-since*", ".*changes-since.*", 3, PER_MINUTE),
Limit("DELETE", "*", ".*", 100, PER_MINUTE),
+ Limit("GET", "*/os-fping", "^/os-fping", 12, PER_HOUR),
]
diff --git a/nova/api/openstack/compute/servers.py b/nova/api/openstack/compute/servers.py
index d8d2f1c28..2c22ccc48 100644
--- a/nova/api/openstack/compute/servers.py
+++ b/nova/api/openstack/compute/servers.py
@@ -32,7 +32,6 @@ from nova import compute
from nova.compute import instance_types
from nova import config
from nova import exception
-from nova import flags
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common.rpc import common as rpc_common
diff --git a/nova/api/openstack/compute/views/addresses.py b/nova/api/openstack/compute/views/addresses.py
index ec5fda64a..c5ccb02fd 100644
--- a/nova/api/openstack/compute/views/addresses.py
+++ b/nova/api/openstack/compute/views/addresses.py
@@ -18,7 +18,7 @@
import itertools
from nova.api.openstack import common
-from nova import flags
+from nova import config
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
diff --git a/nova/api/openstack/compute/views/images.py b/nova/api/openstack/compute/views/images.py
index d1d7d008f..228af7c8d 100644
--- a/nova/api/openstack/compute/views/images.py
+++ b/nova/api/openstack/compute/views/images.py
@@ -19,7 +19,6 @@ import os.path
from nova.api.openstack import common
from nova import config
-from nova import flags
from nova import utils
CONF = config.CONF
diff --git a/nova/api/openstack/compute/views/limits.py b/nova/api/openstack/compute/views/limits.py
index 4b72f26e7..4ccf40de7 100644
--- a/nova/api/openstack/compute/views/limits.py
+++ b/nova/api/openstack/compute/views/limits.py
@@ -47,8 +47,6 @@ class ViewBuilder(object):
"ram": ["maxTotalRAMSize"],
"instances": ["maxTotalInstances"],
"cores": ["maxTotalCores"],
- "gigabytes": ["maxTotalVolumeGigabytes"],
- "volumes": ["maxTotalVolumes"],
"key_pairs": ["maxTotalKeypairs"],
"floating_ips": ["maxTotalFloatingIps"],
"metadata_items": ["maxServerMeta", "maxImageMeta"],
diff --git a/nova/api/openstack/compute/views/versions.py b/nova/api/openstack/compute/views/versions.py
index 826c8b4a5..594369490 100644
--- a/nova/api/openstack/compute/views/versions.py
+++ b/nova/api/openstack/compute/views/versions.py
@@ -20,7 +20,6 @@ import os
from nova.api.openstack import common
from nova import config
-from nova import flags
CONF = config.CONF
diff --git a/nova/api/openstack/extensions.py b/nova/api/openstack/extensions.py
index 298e98603..b44ebefbb 100644
--- a/nova/api/openstack/extensions.py
+++ b/nova/api/openstack/extensions.py
@@ -24,8 +24,8 @@ import webob.exc
import nova.api.openstack
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
+from nova import config
from nova import exception
-from nova import flags
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
import nova.policy
diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py
index bfe0ec599..d28345dd8 100644
--- a/nova/api/openstack/wsgi.py
+++ b/nova/api/openstack/wsgi.py
@@ -1016,7 +1016,8 @@ class Resource(wsgi.Application):
meth = getattr(self.controller, action)
except AttributeError:
if (not self.wsgi_actions or
- action not in ['action', 'create', 'delete']):
+ action not in ['action', 'create', 'delete', 'update',
+ 'show']):
# Propagate the error
raise
else:
@@ -1180,8 +1181,9 @@ class Fault(webob.exc.HTTPException):
'code': code,
'message': self.wrapped_exc.explanation}}
if code == 413:
- retry = self.wrapped_exc.headers['Retry-After']
- fault_data[fault_name]['retryAfter'] = retry
+ retry = self.wrapped_exc.headers.get('Retry-After', None)
+ if retry:
+ fault_data[fault_name]['retryAfter'] = retry
# 'code' is an attribute on the fault tag itself
metadata = {'attributes': {fault_name: 'code'}}
diff --git a/nova/api/sizelimit.py b/nova/api/sizelimit.py
index 1d22e74fc..95fa381ff 100644
--- a/nova/api/sizelimit.py
+++ b/nova/api/sizelimit.py
@@ -22,7 +22,6 @@ import webob.dec
import webob.exc
from nova import config
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import wsgi
diff --git a/nova/block_device.py b/nova/block_device.py
index fa59fe325..bf923ecc0 100644
--- a/nova/block_device.py
+++ b/nova/block_device.py
@@ -18,7 +18,6 @@
import re
from nova import config
-from nova import flags
CONF = config.CONF
diff --git a/nova/cert/manager.py b/nova/cert/manager.py
index d23a15450..0d0e795c7 100644
--- a/nova/cert/manager.py
+++ b/nova/cert/manager.py
@@ -26,8 +26,8 @@ Cert manager manages x509 certificates.
import base64
+from nova import config
from nova import crypto
-from nova import flags
from nova import manager
from nova.openstack.common import log as logging
diff --git a/nova/cert/rpcapi.py b/nova/cert/rpcapi.py
index f6ba11579..7665f544c 100644
--- a/nova/cert/rpcapi.py
+++ b/nova/cert/rpcapi.py
@@ -19,7 +19,6 @@ Client side of the cert manager RPC API.
"""
from nova import config
-from nova import flags
import nova.openstack.common.rpc.proxy
CONF = config.CONF
diff --git a/nova/cloudpipe/pipelib.py b/nova/cloudpipe/pipelib.py
index 414954670..5a74f1f8b 100644
--- a/nova/cloudpipe/pipelib.py
+++ b/nova/cloudpipe/pipelib.py
@@ -32,7 +32,6 @@ from nova import config
from nova import crypto
from nova import db
from nova import exception
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import fileutils
from nova.openstack.common import log as logging
diff --git a/nova/compute/__init__.py b/nova/compute/__init__.py
index b770778aa..c52b80e02 100644
--- a/nova/compute/__init__.py
+++ b/nova/compute/__init__.py
@@ -18,11 +18,11 @@
# Importing full names to not pollute the namespace and cause possible
# collisions with use of 'from nova.compute import <foo>' elsewhere.
-import nova.flags
+import nova.config
import nova.openstack.common.importutils
def API(*args, **kwargs):
importutils = nova.openstack.common.importutils
- cls = importutils.import_class(nova.flags.FLAGS.compute_api_class)
+ cls = importutils.import_class(nova.config.CONF.compute_api_class)
return cls(*args, **kwargs)
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 46f30e10b..6da04c97b 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -27,6 +27,7 @@ import re
import string
import time
import urllib
+import uuid
from nova import block_device
from nova.compute import instance_types
@@ -40,7 +41,6 @@ from nova.consoleauth import rpcapi as consoleauth_rpcapi
from nova import crypto
from nova.db import base
from nova import exception
-from nova import flags
from nova.image import glance
from nova import network
from nova import notifications
@@ -59,7 +59,6 @@ from nova import volume
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
CONF = config.CONF
CONF.import_opt('consoleauth_topic', 'nova.consoleauth')
@@ -140,7 +139,7 @@ class API(base.Base):
self.network_api = network_api or network.API()
self.volume_api = volume_api or volume.API()
self.security_group_api = security_group_api or SecurityGroupAPI()
- self.sgh = importutils.import_object(FLAGS.security_group_handler)
+ self.sgh = importutils.import_object(CONF.security_group_handler)
self.consoleauth_rpcapi = consoleauth_rpcapi.ConsoleAuthAPI()
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
@@ -310,7 +309,7 @@ class API(base.Base):
ramdisk_id = image['properties'].get('ramdisk_id')
# Force to None if using null_kernel
- if kernel_id == str(FLAGS.null_kernel):
+ if kernel_id == str(CONF.null_kernel):
kernel_id = None
ramdisk_id = None
@@ -334,7 +333,7 @@ class API(base.Base):
availability_zone, forced_host = availability_zone.split(':')
if not availability_zone:
- availability_zone = FLAGS.default_schedule_zone
+ availability_zone = CONF.default_schedule_zone
return availability_zone, forced_host
@@ -679,7 +678,7 @@ class API(base.Base):
if not instance.get('uuid'):
# Generate the instance_uuid here so we can use it
# for additional setup before creating the DB entry.
- instance['uuid'] = str(utils.gen_uuid())
+ instance['uuid'] = str(uuid.uuid4())
instance['launch_index'] = 0
instance['vm_state'] = vm_states.BUILDING
@@ -732,6 +731,10 @@ class API(base.Base):
self._populate_instance_shutdown_terminate(instance, image,
block_device_mapping)
+ # ensure_default security group is called before the instance
+ # is created so the creation of the default security group is
+ # proxied to the sgh.
+ self.security_group_api.ensure_default(context)
instance = self.db.instance_create(context, instance)
self._populate_instance_for_bdm(context, instance,
@@ -1305,7 +1308,7 @@ class API(base.Base):
key = key[len(prefix):]
# Skip properties that are non-inheritable
- if key in FLAGS.non_inheritable_image_properties:
+ if key in CONF.non_inheritable_image_properties:
continue
# By using setdefault, we ensure that the properties set
@@ -1564,13 +1567,13 @@ class API(base.Base):
task_state=task_states.RESIZE_REVERTING,
expected_task_state=None)
+ self.db.migration_update(elevated, migration_ref['id'],
+ {'status': 'reverting'})
+
self.compute_rpcapi.revert_resize(context,
instance=instance, migration=migration_ref,
host=migration_ref['dest_compute'], reservations=reservations)
- self.db.migration_update(elevated, migration_ref['id'],
- {'status': 'reverted'})
-
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.RESIZED])
@@ -1588,14 +1591,14 @@ class API(base.Base):
task_state=None,
expected_task_state=None)
+ self.db.migration_update(elevated, migration_ref['id'],
+ {'status': 'confirming'})
+
self.compute_rpcapi.confirm_resize(context,
instance=instance, migration=migration_ref,
host=migration_ref['source_compute'],
reservations=reservations)
- self.db.migration_update(elevated, migration_ref['id'],
- {'status': 'confirmed'})
-
@staticmethod
def _resize_quota_delta(context, new_instance_type,
old_instance_type, sense, compare):
@@ -1749,7 +1752,7 @@ class API(base.Base):
filter_properties = {'ignore_hosts': []}
- if not FLAGS.allow_resize_to_same_host:
+ if not CONF.allow_resize_to_same_host:
filter_properties['ignore_hosts'].append(instance['host'])
args = {
@@ -1805,6 +1808,10 @@ class API(base.Base):
"""Retrieve diagnostics for the given instance."""
return self.compute_rpcapi.get_diagnostics(context, instance=instance)
+ def get_backdoor_port(self, context, host):
+ """Retrieve backdoor port"""
+ return self.compute_rpcapi.get_backdoor_port(context, host)
+
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED])
@@ -2117,7 +2124,7 @@ class AggregateAPI(base.Base):
"""Creates the model for the aggregate."""
zones = [s.availability_zone for s in
self.db.service_get_all_by_topic(context,
- FLAGS.compute_topic)]
+ CONF.compute_topic)]
if availability_zone in zones:
values = {"name": aggregate_name,
"availability_zone": availability_zone}
@@ -2188,9 +2195,10 @@ class AggregateAPI(base.Base):
"""Removes host from the aggregate."""
# validates the host; ComputeHostNotFound is raised if invalid
service = self.db.service_get_all_compute_by_host(context, host)[0]
+ aggregate = self.db.aggregate_get(context, aggregate_id)
self.db.aggregate_host_delete(context, aggregate_id, host)
self.compute_rpcapi.remove_aggregate_host(context,
- aggregate_id=aggregate_id, host_param=host, host=host)
+ aggregate=aggregate, host_param=host, host=host)
return self.get_aggregate(context, aggregate_id)
def _get_aggregate_info(self, context, aggregate):
@@ -2303,7 +2311,7 @@ class SecurityGroupAPI(base.Base):
def __init__(self, **kwargs):
super(SecurityGroupAPI, self).__init__(**kwargs)
self.security_group_rpcapi = compute_rpcapi.SecurityGroupAPI()
- self.sgh = importutils.import_object(FLAGS.security_group_handler)
+ self.sgh = importutils.import_object(CONF.security_group_handler)
def validate_property(self, value, property, allowed):
"""
diff --git a/nova/compute/claims.py b/nova/compute/claims.py
index 6415ae187..c4828b823 100644
--- a/nova/compute/claims.py
+++ b/nova/compute/claims.py
@@ -17,7 +17,6 @@
Claim objects for use with resource tracking.
"""
-from nova import context
from nova.openstack.common import jsonutils
from nova.openstack.common import lockutils
from nova.openstack.common import log as logging
@@ -29,6 +28,9 @@ COMPUTE_RESOURCE_SEMAPHORE = "compute_resources"
class NopClaim(object):
"""For use with compute drivers that do not support resource tracking"""
+ def __init__(self, migration=None):
+ self.migration = migration
+
@property
def disk_gb(self):
return 0
@@ -184,3 +186,35 @@ class Claim(NopClaim):
LOG.info(msg, instance=self.instance)
return can_claim
+
+
+class ResizeClaim(Claim):
+ """Claim used for holding resources for an incoming resize/migration
+ operation.
+ """
+ def __init__(self, instance, instance_type, tracker):
+ super(ResizeClaim, self).__init__(instance, tracker)
+ self.instance_type = instance_type
+ self.migration = None
+
+ @property
+ def disk_gb(self):
+ return (self.instance_type['root_gb'] +
+ self.instance_type['ephemeral_gb'])
+
+ @property
+ def memory_mb(self):
+ return self.instance_type['memory_mb']
+
+ @property
+ def vcpus(self):
+ return self.instance_type['vcpus']
+
+ @lockutils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, 'nova-')
+ def abort(self):
+ """Compute operation requiring claimed resources has failed or
+ been aborted.
+ """
+ LOG.debug(_("Aborting claim: %s") % self, instance=self.instance)
+ self.tracker.abort_resize_claim(self.instance['uuid'],
+ self.instance_type)
diff --git a/nova/compute/instance_types.py b/nova/compute/instance_types.py
index fd796bd91..c35065a8f 100644
--- a/nova/compute/instance_types.py
+++ b/nova/compute/instance_types.py
@@ -21,15 +21,16 @@
"""Built-in instance properties."""
import re
+import uuid
+from nova import config
from nova import context
from nova import db
from nova import exception
-from nova import flags
from nova.openstack.common import log as logging
from nova import utils
-FLAGS = flags.FLAGS
+CONF = config.CONF
LOG = logging.getLogger(__name__)
INVALID_NAME_REGEX = re.compile("[^\w\.\- ]")
@@ -40,7 +41,7 @@ def create(name, memory, vcpus, root_gb, ephemeral_gb=None, flavorid=None,
"""Creates instance types."""
if flavorid is None:
- flavorid = utils.gen_uuid()
+ flavorid = uuid.uuid4()
if swap is None:
swap = 0
if rxtx_factor is None:
@@ -86,6 +87,9 @@ def create(name, memory, vcpus, root_gb, ephemeral_gb=None, flavorid=None,
kwargs['flavorid'] = unicode(flavorid)
# ensure is_public attribute is boolean
+ if not utils.is_valid_boolstr(is_public):
+ msg = _("is_public must be a boolean")
+ raise exception.InvalidInput(reason=msg)
kwargs['is_public'] = utils.bool_from_str(is_public)
try:
@@ -126,7 +130,7 @@ get_all_flavors = get_all_types
def get_default_instance_type():
"""Get the default instance type."""
- name = FLAGS.default_instance_type
+ name = CONF.default_instance_type
return get_instance_type_by_name(name)
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index c7b63df27..0ad3cfc77 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -40,6 +40,7 @@ import socket
import sys
import time
import traceback
+import uuid
from eventlet import greenthread
@@ -52,9 +53,9 @@ from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
+from nova import config
import nova.context
from nova import exception
-from nova import flags
from nova.image import glance
from nova import manager
from nova import network
@@ -140,8 +141,8 @@ compute_opts = [
help="Generate periodic compute.instance.exists notifications"),
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(compute_opts)
+CONF = config.CONF
+CONF.register_opts(compute_opts)
QUOTAS = quota.QUOTAS
@@ -226,18 +227,52 @@ class ComputeVirtAPI(virtapi.VirtAPI):
def instance_get_all_by_host(self, context, host):
return self._compute.db.instance_get_all_by_host(context, host)
+ def aggregate_get_by_host(self, context, host, key=None):
+ return self._compute.db.aggregate_get_by_host(context, host, key=key)
+
+ def aggregate_metadata_get(self, context, aggregate_id):
+ return self._compute.db.aggregate_metadata_get(context, aggregate_id)
+
+ def aggregate_metadata_add(self, context, aggregate_id, metadata,
+ set_delete=False):
+ return self._compute.db.aggregate_metadata_add(context, aggregate_id,
+ metadata,
+ set_delete=set_delete)
+
+ def aggregate_metadata_delete(self, context, aggregate_id, key):
+ return self._compute.db.aggregate_metadata_delete(context,
+ aggregate_id, key)
+
+ def security_group_get_by_instance(self, context, instance_uuid):
+ return self._compute.db.security_group_get_by_instance(context,
+ instance_uuid)
+
+ def security_group_rule_get_by_security_group(self, context,
+ security_group_id):
+ return self._compute.db.security_group_rule_get_by_security_group(
+ context, security_group_id)
+
+ def provider_fw_rule_get_all(self, context):
+ return self._compute.db.provider_fw_rule_get_all(context)
+
+ def agent_build_get_by_triple(self, context, hypervisor, os, architecture):
+ return self._compute.db.agent_build_get_by_triple(context,
+ hypervisor,
+ os,
+ architecture)
+
class ComputeManager(manager.SchedulerDependentManager):
"""Manages the running instances from creation to destruction."""
- RPC_API_VERSION = '2.14'
+ RPC_API_VERSION = '2.17'
def __init__(self, compute_driver=None, *args, **kwargs):
"""Load configuration options and connect to the hypervisor."""
# TODO(vish): sync driver creation logic with the rest of the system
# and re-document the module docstring
if not compute_driver:
- compute_driver = FLAGS.compute_driver
+ compute_driver = CONF.compute_driver
if not compute_driver:
LOG.error(_("Compute driver option required, but not specified"))
@@ -258,7 +293,7 @@ class ComputeManager(manager.SchedulerDependentManager):
self.network_api = network.API()
self.volume_api = volume.API()
self.network_manager = importutils.import_object(
- FLAGS.network_manager, host=kwargs.get('host', None))
+ CONF.network_manager, host=kwargs.get('host', None))
self._last_host_check = 0
self._last_bw_usage_poll = 0
self._last_info_cache_heal = 0
@@ -269,15 +304,24 @@ class ComputeManager(manager.SchedulerDependentManager):
super(ComputeManager, self).__init__(service_name="compute",
*args, **kwargs)
- self.resource_tracker = resource_tracker.ResourceTracker(self.host,
- self.driver)
+ self._resource_tracker_dict = {}
+
+ def _get_resource_tracker(self, nodename):
+ rt = self._resource_tracker_dict.get(nodename)
+ if not rt:
+ rt = resource_tracker.ResourceTracker(self.host,
+ self.driver,
+ nodename)
+ self._resource_tracker_dict[nodename] = rt
+ return rt
def _instance_update(self, context, instance_uuid, **kwargs):
"""Update an instance in the database using kwargs as value."""
(old_ref, instance_ref) = self.db.instance_update_and_get_original(
context, instance_uuid, kwargs)
- self.resource_tracker.update_usage(context, instance_ref)
+ rt = self._get_resource_tracker(instance_ref.get('node'))
+ rt.update_usage(context, instance_ref)
notifications.send_update(context, old_ref, instance_ref)
return instance_ref
@@ -297,7 +341,7 @@ class ComputeManager(manager.SchedulerDependentManager):
context = nova.context.get_admin_context()
instances = self.db.instance_get_all_by_host(context, self.host)
- if FLAGS.defer_iptables_apply:
+ if CONF.defer_iptables_apply:
self.driver.filter_defer_apply_on()
try:
@@ -326,8 +370,8 @@ class ComputeManager(manager.SchedulerDependentManager):
legacy_net_info = self._legacy_nw_info(net_info)
self.driver.plug_vifs(instance, legacy_net_info)
- if ((expect_running and FLAGS.resume_guests_state_on_host_boot)
- or FLAGS.start_guests_on_host_boot):
+ if ((expect_running and CONF.resume_guests_state_on_host_boot)
+ or CONF.start_guests_on_host_boot):
LOG.info(
_('Rebooting instance after nova-compute restart.'),
locals(), instance=instance)
@@ -357,7 +401,7 @@ class ComputeManager(manager.SchedulerDependentManager):
'firewall rules'), instance=instance)
finally:
- if FLAGS.defer_iptables_apply:
+ if CONF.defer_iptables_apply:
self.driver.filter_defer_apply_off()
self._report_driver_status(context)
@@ -378,6 +422,10 @@ class ComputeManager(manager.SchedulerDependentManager):
except exception.NotFound:
return power_state.NOSTATE
+ def get_backdoor_port(self, context):
+ """Return backdoor port for eventlet_backdoor"""
+ return self.backdoor_port
+
def get_console_topic(self, context):
"""Retrieves the console host for a project on this host.
@@ -386,8 +434,8 @@ class ComputeManager(manager.SchedulerDependentManager):
"""
#TODO(mdragon): perhaps make this variable by console_type?
return rpc.queue_get_for(context,
- FLAGS.console_topic,
- FLAGS.console_host)
+ CONF.console_topic,
+ CONF.console_host)
def get_console_pool_info(self, context, console_type):
return self.driver.get_console_pool_info(console_type)
@@ -520,10 +568,10 @@ class ComputeManager(manager.SchedulerDependentManager):
context, instance, "create.start",
extra_usage_info=extra_usage_info)
network_info = None
+ rt = self._get_resource_tracker(instance.get('node'))
try:
limits = filter_properties.get('limits', {})
- with self.resource_tracker.instance_claim(context, instance,
- limits):
+ with rt.instance_claim(context, instance, limits):
network_info = self._allocate_network(context, instance,
requested_networks)
@@ -635,7 +683,7 @@ class ComputeManager(manager.SchedulerDependentManager):
@manager.periodic_task
def _check_instance_build_time(self, context):
"""Ensure that instances are not stuck in build."""
- timeout = FLAGS.instance_build_timeout
+ timeout = CONF.instance_build_timeout
if timeout == 0:
return
@@ -651,13 +699,13 @@ class ComputeManager(manager.SchedulerDependentManager):
def _update_access_ip(self, context, instance, nw_info):
"""Update the access ip values for a given instance.
- If FLAGS.default_access_ip_network_name is set, this method will
+ If CONF.default_access_ip_network_name is set, this method will
grab the corresponding network and set the access ip values
accordingly. Note that when there are multiple ips to choose from,
an arbitrary one will be chosen.
"""
- network_name = FLAGS.default_access_ip_network_name
+ network_name = CONF.default_access_ip_network_name
if not network_name:
return
@@ -749,7 +797,7 @@ class ComputeManager(manager.SchedulerDependentManager):
vm_state=vm_states.BUILDING,
task_state=task_states.NETWORKING,
expected_task_state=None)
- is_vpn = instance['image_ref'] == str(FLAGS.vpn_image_id)
+ is_vpn = instance['image_ref'] == str(CONF.vpn_image_id)
try:
# allocate and get network info
network_info = self.network_api.allocate_for_instance(
@@ -807,7 +855,7 @@ class ComputeManager(manager.SchedulerDependentManager):
extra_usage_info=None):
# NOTE(sirp): The only thing this wrapper function does extra is handle
# the passing in of `self.host`. Ordinarily this will just be
- # `FLAGS.host`, but `Manager`'s gets a chance to override this in its
+ # CONF.host`, but `Manager`'s gets a chance to override this in its
# `__init__`.
compute_utils.notify_about_instance_usage(
context, instance, event_suffix, network_info=network_info,
@@ -1363,7 +1411,7 @@ class ComputeManager(manager.SchedulerDependentManager):
if new_pass is None:
# Generate a random password
- new_pass = utils.generate_password(FLAGS.password_length)
+ new_pass = utils.generate_password(CONF.password_length)
max_tries = 10
@@ -1440,6 +1488,27 @@ class ComputeManager(manager.SchedulerDependentManager):
instance=instance)
self.driver.inject_file(instance, path, file_contents)
+ def _get_rescue_image_ref(self, context, instance):
+ """Determine what image should be used to boot the rescue VM. """
+ system_meta = self.db.instance_system_metadata_get(
+ context, instance['uuid'])
+
+ rescue_image_ref = system_meta.get('image_base_image_ref')
+
+ # 1. First try to use base image associated with instance's current
+ # image.
+ #
+ # The idea here is to provide the customer with a rescue environment
+ # which they are familiar with. So, if they built their instance off of
+ # a Debian image, their rescue VM wil also be Debian.
+ if rescue_image_ref:
+ return rescue_image_ref
+
+ # 2. As a last resort, use instance's current image
+ LOG.warn(_('Unable to find a different image to use for rescue VM,'
+ ' using instance\'s current image'))
+ return instance['image_ref']
+
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@wrap_instance_fault
@@ -1452,15 +1521,19 @@ class ComputeManager(manager.SchedulerDependentManager):
LOG.audit(_('Rescuing'), context=context, instance=instance)
admin_password = (rescue_password if rescue_password else
- utils.generate_password(FLAGS.password_length))
+ utils.generate_password(CONF.password_length))
network_info = self._get_instance_nw_info(context, instance)
- image_meta = _get_image_meta(context, instance['image_ref'])
+
+ # Boot the instance using the 'base' image instead of the user's
+ # current (possibly broken) image
+ rescue_image_ref = self._get_rescue_image_ref(context, instance)
+ rescue_image_meta = _get_image_meta(context, rescue_image_ref)
with self._error_out_instance_on_exception(context, instance['uuid']):
self.driver.rescue(context, instance,
- self._legacy_nw_info(network_info), image_meta,
- admin_password)
+ self._legacy_nw_info(network_info),
+ rescue_image_meta, admin_password)
current_power_state = self._get_power_state(context, instance)
self._instance_update(context,
@@ -1523,6 +1596,9 @@ class ComputeManager(manager.SchedulerDependentManager):
self.driver.confirm_migration(migration, instance,
self._legacy_nw_info(network_info))
+ rt = self._get_resource_tracker(instance.get('node'))
+ rt.confirm_resize(context, migration)
+
self._notify_about_instance_usage(
context, instance, "resize.confirm.end",
network_info=network_info)
@@ -1567,6 +1643,9 @@ class ComputeManager(manager.SchedulerDependentManager):
self._terminate_volume_connections(context, instance)
+ rt = self._get_resource_tracker(instance.get('node'))
+ rt.revert_resize(context, migration, status='reverted_dest')
+
self.compute_rpcapi.finish_revert_resize(context, instance,
migration, migration['source_compute'],
reservations)
@@ -1637,8 +1716,8 @@ class ComputeManager(manager.SchedulerDependentManager):
vm_state=vm_states.ACTIVE,
task_state=None)
- self.db.migration_update(elevated, migration['id'],
- {'status': 'reverted'})
+ rt = self._get_resource_tracker(instance.get('node'))
+ rt.revert_resize(context, migration)
self._notify_about_instance_usage(
context, instance, "resize.revert.end")
@@ -1655,6 +1734,29 @@ class ComputeManager(manager.SchedulerDependentManager):
if reservations:
QUOTAS.rollback(context, reservations)
+ def _prep_resize(self, context, image, instance, instance_type,
+ reservations, request_spec, filter_properties):
+
+ if not filter_properties:
+ filter_properties = {}
+
+ same_host = instance['host'] == self.host
+ if same_host and not CONF.allow_resize_to_same_host:
+ self._set_instance_error_state(context, instance['uuid'])
+ msg = _('destination same as source!')
+ raise exception.MigrationError(msg)
+
+ limits = filter_properties.get('limits', {})
+ rt = self._get_resource_tracker(instance.get('node'))
+ with rt.resize_claim(context, instance, instance_type, limits=limits) \
+ as claim:
+ migration_ref = claim.migration
+
+ LOG.audit(_('Migrating'), context=context,
+ instance=instance)
+ self.compute_rpcapi.resize_instance(context, instance,
+ migration_ref, image, instance_type, reservations)
+
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@wrap_instance_fault
@@ -1672,35 +1774,9 @@ class ComputeManager(manager.SchedulerDependentManager):
context, instance, current_period=True)
self._notify_about_instance_usage(
context, instance, "resize.prep.start")
-
try:
- same_host = instance['host'] == self.host
- if same_host and not FLAGS.allow_resize_to_same_host:
- self._set_instance_error_state(context, instance['uuid'])
- msg = _('destination same as source!')
- raise exception.MigrationError(msg)
-
- # TODO(russellb): no-db-compute: Send the old instance type
- # info that is needed via rpc so db access isn't required
- # here.
- old_instance_type_id = instance['instance_type_id']
- old_instance_type = instance_types.get_instance_type(
- old_instance_type_id)
-
- migration_ref = self.db.migration_create(context.elevated(),
- {'instance_uuid': instance['uuid'],
- 'source_compute': instance['host'],
- 'dest_compute': self.host,
- 'dest_host': self.driver.get_host_ip_addr(),
- 'old_instance_type_id': old_instance_type['id'],
- 'new_instance_type_id': instance_type['id'],
- 'status': 'pre-migrating'})
-
- LOG.audit(_('Migrating'), context=context,
- instance=instance)
- self.compute_rpcapi.resize_instance(context, instance,
- migration_ref, image, reservations)
-
+ self._prep_resize(context, image, instance, instance_type,
+ reservations, request_spec, filter_properties)
except Exception:
# try to re-schedule the resize elsewhere:
self._reschedule_resize_or_reraise(context, image, instance,
@@ -1754,15 +1830,17 @@ class ComputeManager(manager.SchedulerDependentManager):
@reverts_task_state
@wrap_instance_fault
def resize_instance(self, context, instance, image,
- reservations=None, migration=None, migration_id=None):
+ reservations=None, migration=None, migration_id=None,
+ instance_type=None):
"""Starts the migration of a running instance to another host."""
elevated = context.elevated()
if not migration:
migration = self.db.migration_get(elevated, migration_id)
with self._error_out_instance_on_exception(context, instance['uuid'],
reservations):
- instance_type_ref = self.db.instance_type_get(context,
- migration['new_instance_type_id'])
+ if not instance_type:
+ instance_type = self.db.instance_type_get(context,
+ migration['new_instance_type_id'])
network_info = self._get_instance_nw_info(context, instance)
@@ -1770,9 +1848,9 @@ class ComputeManager(manager.SchedulerDependentManager):
migration['id'],
{'status': 'migrating'})
- self._instance_update(context, instance['uuid'],
- task_state=task_states.RESIZE_MIGRATING,
- expected_task_state=task_states.RESIZE_PREP)
+ instance = self._instance_update(context, instance['uuid'],
+ task_state=task_states.RESIZE_MIGRATING,
+ expected_task_state=task_states.RESIZE_PREP)
self._notify_about_instance_usage(
context, instance, "resize.start", network_info=network_info)
@@ -1782,7 +1860,7 @@ class ComputeManager(manager.SchedulerDependentManager):
disk_info = self.driver.migrate_disk_and_power_off(
context, instance, migration['dest_host'],
- instance_type_ref, self._legacy_nw_info(network_info),
+ instance_type, self._legacy_nw_info(network_info),
block_device_info)
self._terminate_volume_connections(context, instance)
@@ -1794,11 +1872,11 @@ class ComputeManager(manager.SchedulerDependentManager):
migration['id'],
{'status': 'post-migrating'})
- self._instance_update(context, instance['uuid'],
- host=migration['dest_compute'],
- task_state=task_states.RESIZE_MIGRATED,
- expected_task_state=task_states.
- RESIZE_MIGRATING)
+ instance = self._instance_update(context, instance['uuid'],
+ host=migration['dest_compute'],
+ task_state=task_states.RESIZE_MIGRATED,
+ expected_task_state=task_states.
+ RESIZE_MIGRATING)
self.compute_rpcapi.finish_resize(context, instance,
migration, image, disk_info,
@@ -1843,7 +1921,7 @@ class ComputeManager(manager.SchedulerDependentManager):
network_info = self._get_instance_nw_info(context, instance)
- self._instance_update(context, instance['uuid'],
+ instance = self._instance_update(context, instance['uuid'],
task_state=task_states.RESIZE_FINISH,
expected_task_state=task_states.RESIZE_MIGRATED)
@@ -2114,14 +2192,14 @@ class ComputeManager(manager.SchedulerDependentManager):
"""Return connection information for a vnc console."""
context = context.elevated()
LOG.debug(_("Getting vnc console"), instance=instance)
- token = str(utils.gen_uuid())
+ token = str(uuid.uuid4())
if console_type == 'novnc':
# For essex, novncproxy_base_url must include the full path
# including the html file (like http://myhost/vnc_auto.html)
- access_url = '%s?token=%s' % (FLAGS.novncproxy_base_url, token)
+ access_url = '%s?token=%s' % (CONF.novncproxy_base_url, token)
elif console_type == 'xvpvnc':
- access_url = '%s?token=%s' % (FLAGS.xvpvncproxy_base_url, token)
+ access_url = '%s?token=%s' % (CONF.xvpvncproxy_base_url, token)
else:
raise exception.ConsoleTypeInvalid(console_type=console_type)
@@ -2319,7 +2397,7 @@ class ComputeManager(manager.SchedulerDependentManager):
and None otherwise.
"""
src_compute_info = self._get_compute_info(ctxt, instance['host'])
- dst_compute_info = self._get_compute_info(ctxt, FLAGS.host)
+ dst_compute_info = self._get_compute_info(ctxt, CONF.host)
dest_check_data = self.driver.check_can_live_migrate_destination(ctxt,
instance, src_compute_info, dst_compute_info,
block_migration, disk_over_commit)
@@ -2626,7 +2704,7 @@ class ComputeManager(manager.SchedulerDependentManager):
If anything errors, we don't care. It's possible the instance
has been deleted, etc.
"""
- heal_interval = FLAGS.heal_instance_info_cache_interval
+ heal_interval = CONF.heal_instance_info_cache_interval
if not heal_interval:
return
curr_time = time.time()
@@ -2669,25 +2747,25 @@ class ComputeManager(manager.SchedulerDependentManager):
@manager.periodic_task
def _poll_rebooting_instances(self, context):
- if FLAGS.reboot_timeout > 0:
+ if CONF.reboot_timeout > 0:
instances = self.db.instance_get_all_hung_in_rebooting(
- context, FLAGS.reboot_timeout)
- self.driver.poll_rebooting_instances(FLAGS.reboot_timeout,
+ context, CONF.reboot_timeout)
+ self.driver.poll_rebooting_instances(CONF.reboot_timeout,
instances)
@manager.periodic_task
def _poll_rescued_instances(self, context):
- if FLAGS.rescue_timeout > 0:
- self.driver.poll_rescued_instances(FLAGS.rescue_timeout)
+ if CONF.rescue_timeout > 0:
+ self.driver.poll_rescued_instances(CONF.rescue_timeout)
@manager.periodic_task
def _poll_unconfirmed_resizes(self, context):
- if FLAGS.resize_confirm_window > 0:
+ if CONF.resize_confirm_window > 0:
migrations = self.db.migration_get_unconfirmed_by_dest_compute(
- context, FLAGS.resize_confirm_window, self.host)
+ context, CONF.resize_confirm_window, self.host)
migrations_info = dict(migration_count=len(migrations),
- confirm_window=FLAGS.resize_confirm_window)
+ confirm_window=CONF.resize_confirm_window)
if migrations_info["migration_count"] > 0:
LOG.info(_("Found %(migration_count)d unconfirmed migrations "
@@ -2736,7 +2814,7 @@ class ComputeManager(manager.SchedulerDependentManager):
@manager.periodic_task
def _instance_usage_audit(self, context):
- if FLAGS.instance_usage_audit:
+ if CONF.instance_usage_audit:
if not compute_utils.has_audit_been_run(context, self.host):
begin, end = utils.last_completed_audit_period()
instances = self.db.instance_get_active_by_window_joined(
@@ -2786,7 +2864,7 @@ class ComputeManager(manager.SchedulerDependentManager):
curr_time = time.time()
if (curr_time - self._last_bw_usage_poll >
- FLAGS.bandwidth_poll_interval):
+ CONF.bandwidth_poll_interval):
self._last_bw_usage_poll = curr_time
LOG.info(_("Updating bandwidth usage cache"))
@@ -2852,13 +2930,15 @@ class ComputeManager(manager.SchedulerDependentManager):
@manager.periodic_task
def _report_driver_status(self, context):
curr_time = time.time()
- if curr_time - self._last_host_check > FLAGS.host_state_interval:
+ if curr_time - self._last_host_check > CONF.host_state_interval:
self._last_host_check = curr_time
LOG.info(_("Updating host status"))
# This will grab info about the host and queue it
# to be sent to the Schedulers.
capabilities = self.driver.get_host_stats(refresh=True)
- capabilities['host_ip'] = FLAGS.my_ip
+ for capability in (capabilities if isinstance(capabilities, list)
+ else [capabilities]):
+ capability['host_ip'] = CONF.my_ip
self.update_service_capabilities(capabilities)
@manager.periodic_task(ticks_between_runs=10)
@@ -3000,9 +3080,9 @@ class ComputeManager(manager.SchedulerDependentManager):
@manager.periodic_task
def _reclaim_queued_deletes(self, context):
"""Reclaim instances that are queued for deletion."""
- interval = FLAGS.reclaim_instance_interval
+ interval = CONF.reclaim_instance_interval
if interval <= 0:
- LOG.debug(_("FLAGS.reclaim_instance_interval <= 0, skipping..."))
+ LOG.debug(_("CONF.reclaim_instance_interval <= 0, skipping..."))
return
instances = self.db.instance_get_all_by_host(context, self.host)
@@ -3027,10 +3107,16 @@ class ComputeManager(manager.SchedulerDependentManager):
:param context: security context
"""
- self.resource_tracker.update_available_resource(context)
+ new_resource_tracker_dict = {}
+ nodenames = self.driver.get_available_nodes()
+ for nodename in nodenames:
+ rt = self._get_resource_tracker(nodename)
+ rt.update_available_resource(context)
+ new_resource_tracker_dict[nodename] = rt
+ self._resource_tracker_dict = new_resource_tracker_dict
@manager.periodic_task(
- ticks_between_runs=FLAGS.running_deleted_instance_poll_interval)
+ ticks_between_runs=CONF.running_deleted_instance_poll_interval)
def _cleanup_running_deleted_instances(self, context):
"""Cleanup any instances which are erroneously still running after
having been deleted.
@@ -3051,7 +3137,7 @@ class ComputeManager(manager.SchedulerDependentManager):
should do in production), or automatically reaping the instances (more
appropriate for dev environments).
"""
- action = FLAGS.running_deleted_instance_action
+ action = CONF.running_deleted_instance_action
if action == "noop":
return
@@ -3079,7 +3165,7 @@ class ComputeManager(manager.SchedulerDependentManager):
self._cleanup_volumes(context, instance['uuid'], bdms)
else:
raise Exception(_("Unrecognized value '%(action)s'"
- " for FLAGS.running_deleted_"
+ " for CONF.running_deleted_"
"instance_action"), locals(),
instance=instance)
@@ -3089,7 +3175,7 @@ class ComputeManager(manager.SchedulerDependentManager):
should be pushed down to the virt layer for efficiency.
"""
def deleted_instance(instance):
- timeout = FLAGS.running_deleted_instance_timeout
+ timeout = CONF.running_deleted_instance_timeout
present = instance.name in present_name_labels
erroneously_running = instance.deleted and present
old_enough = (not instance.deleted_at or
@@ -3131,10 +3217,12 @@ class ComputeManager(manager.SchedulerDependentManager):
aggregate['id'], host)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
- def remove_aggregate_host(self, context, aggregate_id,
- host, slave_info=None):
+ def remove_aggregate_host(self, context, host, slave_info=None,
+ aggregate=None, aggregate_id=None):
"""Removes a host from a physical hypervisor pool."""
- aggregate = self.db.aggregate_get(context, aggregate_id)
+ if not aggregate:
+ aggregate = self.db.aggregate_get(context, aggregate_id)
+
try:
self.driver.remove_from_aggregate(context, aggregate, host,
slave_info=slave_info)
@@ -3143,17 +3231,17 @@ class ComputeManager(manager.SchedulerDependentManager):
with excutils.save_and_reraise_exception():
self.driver.undo_aggregate_operation(
context, self.db.aggregate_host_add,
- aggregate.id, host,
+ aggregate['id'], host,
isinstance(e, exception.AggregateError))
@manager.periodic_task(
- ticks_between_runs=FLAGS.image_cache_manager_interval)
+ ticks_between_runs=CONF.image_cache_manager_interval)
def _run_image_cache_manager_pass(self, context):
"""Run a single pass of the image cache manager."""
if not self.driver.capabilities["has_imagecache"]:
return
- if FLAGS.image_cache_manager_interval == 0:
+ if CONF.image_cache_manager_interval == 0:
return
all_instances = self.db.instance_get_all(context)
diff --git a/nova/compute/resource_tracker.py b/nova/compute/resource_tracker.py
index e4a65c081..8c799573d 100644
--- a/nova/compute/resource_tracker.py
+++ b/nova/compute/resource_tracker.py
@@ -20,17 +20,19 @@ model.
"""
from nova.compute import claims
+from nova.compute import instance_types
+from nova.compute import task_states
from nova.compute import vm_states
+from nova import config
from nova import context
from nova import db
from nova import exception
-from nova import flags
from nova import notifications
from nova.openstack.common import cfg
from nova.openstack.common import importutils
+from nova.openstack.common import jsonutils
from nova.openstack.common import lockutils
from nova.openstack.common import log as logging
-from nova import utils
resource_tracker_opts = [
cfg.IntOpt('reserved_host_disk_mb', default=0,
@@ -42,8 +44,8 @@ resource_tracker_opts = [
help='Class that will manage stats for the local compute host')
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(resource_tracker_opts)
+CONF = config.CONF
+CONF.register_opts(resource_tracker_opts)
LOG = logging.getLogger(__name__)
COMPUTE_RESOURCE_SEMAPHORE = claims.COMPUTE_RESOURCE_SEMAPHORE
@@ -54,12 +56,14 @@ class ResourceTracker(object):
are built and destroyed.
"""
- def __init__(self, host, driver):
+ def __init__(self, host, driver, nodename):
self.host = host
self.driver = driver
+ self.nodename = nodename
self.compute_node = None
- self.stats = importutils.import_object(FLAGS.compute_stats_class)
+ self.stats = importutils.import_object(CONF.compute_stats_class)
self.tracked_instances = {}
+ self.tracked_migrations = {}
@lockutils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, 'nova-')
def instance_claim(self, context, instance_ref, limits=None):
@@ -108,10 +112,69 @@ class ResourceTracker(object):
else:
raise exception.ComputeResourcesUnavailable()
+ @lockutils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, 'nova-')
+ def resize_claim(self, context, instance_ref, instance_type, limits=None):
+ """Indicate that resources are needed for a resize operation to this
+ compute host.
+ :param context: security context
+ :param instance_ref: instance to reserve resources for
+ :param instance_type: new instance_type being resized to
+ :param limits: Dict of oversubscription limits for memory, disk,
+ and CPUs.
+ :returns: A Claim ticket representing the reserved resources. This
+ should be turned into finalize a resource claim or free
+ resources after the compute operation is finished.
+ """
+ if self.disabled:
+ # compute_driver doesn't support resource tracking, just
+ # generate the migration record and continue the resize:
+ migration_ref = self._create_migration(context, instance_ref,
+ instance_type)
+ return claims.NopClaim(migration=migration_ref)
+
+ claim = claims.ResizeClaim(instance_ref, instance_type, self)
+
+ if claim.test(self.compute_node, limits):
+
+ migration_ref = self._create_migration(context, instance_ref,
+ instance_type)
+ claim.migration = migration_ref
+
+ # Mark the resources in-use for the resize landing on this
+ # compute host:
+ self._update_usage_from_migration(self.compute_node, migration_ref)
+ self._update(context, self.compute_node)
+
+ return claim
+
+ else:
+ raise exception.ComputeResourcesUnavailable()
+
+ def _create_migration(self, context, instance, instance_type):
+ """Create a migration record for the upcoming resize. This should
+ be done while the COMPUTE_RESOURCES_SEMAPHORE is held so the resource
+ claim will not be lost if the audit process starts.
+ """
+ # TODO(russellb): no-db-compute: Send the old instance type
+ # info that is needed via rpc so db access isn't required
+ # here.
+ old_instance_type_id = instance['instance_type_id']
+ old_instance_type = instance_types.get_instance_type(
+ old_instance_type_id)
+
+ return db.migration_create(context.elevated(),
+ {'instance_uuid': instance['uuid'],
+ 'source_compute': instance['host'],
+ 'dest_compute': self.host,
+ 'dest_host': self.driver.get_host_ip_addr(),
+ 'old_instance_type_id': old_instance_type['id'],
+ 'new_instance_type_id': instance_type['id'],
+ 'status': 'pre-migrating'})
+
def _set_instance_host(self, context, instance_uuid):
"""Tag the instance as belonging to this host. This should be done
- while the COMPUTE_RESOURCES_SEMPAHORE is being held so the resource
- claim will not be lost if the audit process starts.
+ while the COMPUTE_RESOURCES_SEMPAHORE is held so the resource claim
+ will not be lost if the audit process starts.
"""
values = {'host': self.host, 'launched_on': self.host}
(old_ref, instance_ref) = db.instance_update_and_get_original(context,
@@ -129,6 +192,18 @@ class ResourceTracker(object):
ctxt = context.get_admin_context()
self._update(ctxt, self.compute_node)
+ def abort_resize_claim(self, instance_uuid, instance_type):
+ """Remove usage for an incoming migration"""
+ if instance_uuid in self.tracked_migrations:
+ migration, itype = self.tracked_migrations.pop(instance_uuid)
+
+ if instance_type['id'] == migration['new_instance_type_id']:
+ self.stats.update_stats_for_migration(itype, sign=-1)
+ self._update_usage(self.compute_node, itype, sign=-1)
+
+ ctxt = context.get_admin_context()
+ self._update(ctxt, self.compute_node)
+
@lockutils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, 'nova-')
def update_usage(self, context, instance):
"""Update the resource usage and stats after a change in an
@@ -137,9 +212,10 @@ class ResourceTracker(object):
if self.disabled:
return
+ uuid = instance['uuid']
+
# don't update usage for this instance unless it submitted a resource
# claim first:
- uuid = instance['uuid']
if uuid in self.tracked_instances:
self._update_usage_from_instance(self.compute_node, instance)
self._update(context.elevated(), self.compute_node)
@@ -157,7 +233,8 @@ class ResourceTracker(object):
declared a need for resources, but not necessarily retrieved them from
the hypervisor layer yet.
"""
- resources = self.driver.get_available_resource()
+ LOG.audit(_("Auditing locally available compute resources"))
+ resources = self.driver.get_available_resource(self.nodename)
if not resources:
# The virt driver does not support this function
LOG.audit(_("Virt driver does not support "
@@ -169,11 +246,18 @@ class ResourceTracker(object):
self._report_hypervisor_resource_view(resources)
- # Grab all instances assigned to this host:
- instances = db.instance_get_all_by_host(context, self.host)
+ # Grab all instances assigned to this node:
+ instances = db.instance_get_all_by_host_and_node(context, self.host,
+ self.nodename)
# Now calculate usage based on instance utilization:
self._update_usage_from_instances(resources, instances)
+
+ # Grab all in-progress migrations:
+ migrations = db.migration_get_in_progress_by_host(context, self.host)
+
+ self._update_usage_from_migrations(resources, migrations)
+
self._report_final_resource_view(resources)
self._sync_compute_node(context, resources)
@@ -187,9 +271,12 @@ class ResourceTracker(object):
# no service record, disable resource
return
- compute_node_ref = service['compute_node']
- if compute_node_ref:
- self.compute_node = compute_node_ref[0]
+ compute_node_refs = service['compute_node']
+ if compute_node_refs:
+ for cn in compute_node_refs:
+ if cn.get('hypervisor_hostname') == self.nodename:
+ self.compute_node = cn
+ break
if not self.compute_node:
# Need to create the ComputeNode record:
@@ -254,6 +341,104 @@ class ResourceTracker(object):
self.compute_node['id'], values, prune_stats)
self.compute_node = dict(compute_node)
+ def confirm_resize(self, context, migration, status='confirmed'):
+ """Cleanup usage for a confirmed resize"""
+ elevated = context.elevated()
+ db.migration_update(elevated, migration['id'],
+ {'status': status})
+ self.update_available_resource(elevated)
+
+ def revert_resize(self, context, migration, status='reverted'):
+ """Cleanup usage for a reverted resize"""
+ self.confirm_resize(context, migration, status)
+
+ def _update_usage(self, resources, usage, sign=1):
+ resources['memory_mb_used'] += sign * usage['memory_mb']
+ resources['local_gb_used'] += sign * usage['root_gb']
+ resources['local_gb_used'] += sign * usage['ephemeral_gb']
+
+ # free ram and disk may be negative, depending on policy:
+ resources['free_ram_mb'] = (resources['memory_mb'] -
+ resources['memory_mb_used'])
+ resources['free_disk_gb'] = (resources['local_gb'] -
+ resources['local_gb_used'])
+
+ resources['running_vms'] = self.stats.num_instances
+ resources['vcpus_used'] = self.stats.num_vcpus_used
+
+ def _update_usage_from_migration(self, resources, migration):
+ """Update usage for a single migration. The record may
+ represent an incoming or outbound migration.
+ """
+ uuid = migration['instance_uuid']
+ LOG.audit("Updating from migration %s" % uuid)
+
+ incoming = (migration['dest_compute'] == self.host)
+ outbound = (migration['source_compute'] == self.host)
+ same_host = (incoming and outbound)
+
+ instance = self.tracked_instances.get(uuid, None)
+ itype = None
+
+ if same_host:
+ # same host resize. record usage for whichever instance type the
+ # instance is *not* in:
+ if (instance['instance_type_id'] ==
+ migration['old_instance_type_id']):
+
+ itype = migration['new_instance_type_id']
+ else:
+ # instance record already has new flavor, hold space for a
+ # possible revert to the old instance type:
+ itype = migration['old_instance_type_id']
+
+ elif incoming and not instance:
+ # instance has not yet migrated here:
+ itype = migration['new_instance_type_id']
+
+ elif outbound and not instance:
+ # instance migrated, but record usage for a possible revert:
+ itype = migration['old_instance_type_id']
+
+ if itype:
+ instance_type = instance_types.get_instance_type(itype)
+ self.stats.update_stats_for_migration(instance_type)
+ self._update_usage(resources, instance_type)
+ resources['stats'] = self.stats
+ self.tracked_migrations[uuid] = (migration, instance_type)
+
+ def _update_usage_from_migrations(self, resources, migrations):
+
+ self.tracked_migrations.clear()
+
+ filtered = {}
+
+ # do some defensive filtering against bad migrations records in the
+ # database:
+ for migration in migrations:
+
+ instance = migration['instance']
+
+ if not instance:
+ # migration referencing deleted instance
+ continue
+
+ uuid = instance['uuid']
+
+ # skip migration if instance isn't in a resize state:
+ if not self._instance_in_resize_state(instance):
+ LOG.warn(_("Instance not resizing, skipping migration."),
+ instance_uuid=uuid)
+ continue
+
+ # filter to most recently updated migration for each instance:
+ m = filtered.get(uuid, None)
+ if not m or migration['updated_at'] >= m['updated_at']:
+ filtered[uuid] = migration
+
+ for migration in filtered.values():
+ self._update_usage_from_migration(resources, migration)
+
def _update_usage_from_instance(self, resources, instance):
"""Update usage for a single instance."""
@@ -262,7 +447,7 @@ class ResourceTracker(object):
is_deleted_instance = instance['vm_state'] == vm_states.DELETED
if is_new_instance:
- self.tracked_instances[uuid] = 1
+ self.tracked_instances[uuid] = jsonutils.to_primitive(instance)
sign = 1
if is_deleted_instance:
@@ -274,18 +459,7 @@ class ResourceTracker(object):
# if it's a new or deleted instance:
if is_new_instance or is_deleted_instance:
# new instance, update compute node resource usage:
- resources['memory_mb_used'] += sign * instance['memory_mb']
- resources['local_gb_used'] += sign * instance['root_gb']
- resources['local_gb_used'] += sign * instance['ephemeral_gb']
-
- # free ram and disk may be negative, depending on policy:
- resources['free_ram_mb'] = (resources['memory_mb'] -
- resources['memory_mb_used'])
- resources['free_disk_gb'] = (resources['local_gb'] -
- resources['local_gb_used'])
-
- resources['running_vms'] = self.stats.num_instances
- resources['vcpus_used'] = self.stats.num_vcpus_used
+ self._update_usage(resources, instance, sign=sign)
resources['current_workload'] = self.stats.calculate_workload()
resources['stats'] = self.stats
@@ -302,8 +476,8 @@ class ResourceTracker(object):
self.stats.clear()
# set some intiial values, reserve room for host/hypervisor:
- resources['local_gb_used'] = FLAGS.reserved_host_disk_mb / 1024
- resources['memory_mb_used'] = FLAGS.reserved_host_memory_mb
+ resources['local_gb_used'] = CONF.reserved_host_disk_mb / 1024
+ resources['memory_mb_used'] = CONF.reserved_host_memory_mb
resources['vcpus_used'] = 0
resources['free_ram_mb'] = (resources['memory_mb'] -
resources['memory_mb_used'])
@@ -323,3 +497,17 @@ class ResourceTracker(object):
if missing_keys:
reason = _("Missing keys: %s") % missing_keys
raise exception.InvalidInput(reason=reason)
+
+ def _instance_in_resize_state(self, instance):
+ vm = instance['vm_state']
+ task = instance['task_state']
+
+ if vm == vm_states.RESIZED:
+ return True
+
+ if (vm == vm_states.ACTIVE and task in [task_states.RESIZE_PREP,
+ task_states.RESIZE_MIGRATING, task_states.RESIZE_MIGRATED,
+ task_states.RESIZE_FINISH]):
+ return True
+
+ return False
diff --git a/nova/compute/rpcapi.py b/nova/compute/rpcapi.py
index 5bf17adcd..94551ce2a 100644
--- a/nova/compute/rpcapi.py
+++ b/nova/compute/rpcapi.py
@@ -18,14 +18,13 @@
Client side of the compute RPC API.
"""
+from nova import config
from nova import exception
-from nova import flags
from nova.openstack.common import jsonutils
from nova.openstack.common import rpc
import nova.openstack.common.rpc.proxy
-
-FLAGS = flags.FLAGS
+CONF = config.CONF
def _compute_topic(topic, ctxt, host, instance):
@@ -142,6 +141,9 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
2.12 - Remove migration_id, add migration to revert_resize
2.13 - Remove migration_id, add migration to finish_revert_resize
2.14 - Remove aggregate_id, add aggregate to add_aggregate_host
+ 2.15 - Remove aggregate_id, add aggregate to remove_aggregate_host
+ 2.16 - Add instance_type to resize_instance
+ 2.17 - Add get_backdoor_port()
'''
#
@@ -156,7 +158,7 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
def __init__(self):
super(ComputeAPI, self).__init__(
- topic=FLAGS.compute_topic,
+ topic=CONF.compute_topic,
default_version=self.BASE_RPC_API_VERSION)
def add_aggregate_host(self, ctxt, aggregate, host_param, host,
@@ -389,7 +391,7 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
self.cast(ctxt, self.make_msg('refresh_provider_fw_rules'),
_compute_topic(self.topic, ctxt, host, None))
- def remove_aggregate_host(self, ctxt, aggregate_id, host_param, host,
+ def remove_aggregate_host(self, ctxt, aggregate, host_param, host,
slave_info=None):
'''Remove aggregate host.
@@ -400,11 +402,12 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
:param host: This is the host to send the message to.
'''
+ aggregate_p = jsonutils.to_primitive(aggregate)
self.cast(ctxt, self.make_msg('remove_aggregate_host',
- aggregate_id=aggregate_id, host=host_param,
+ aggregate=aggregate_p, host=host_param,
slave_info=slave_info),
topic=_compute_topic(self.topic, ctxt, host, None),
- version='2.2')
+ version='2.15')
def remove_fixed_ip_from_instance(self, ctxt, instance, address):
instance_p = jsonutils.to_primitive(instance)
@@ -431,15 +434,17 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
instance=instance_p),
topic=_compute_topic(self.topic, ctxt, None, instance))
- def resize_instance(self, ctxt, instance, migration, image,
+ def resize_instance(self, ctxt, instance, migration, image, instance_type,
reservations=None):
topic = _compute_topic(self.topic, ctxt, None, instance)
instance_p = jsonutils.to_primitive(instance)
migration_p = jsonutils.to_primitive(migration)
+ instance_type_p = jsonutils.to_primitive(instance_type)
self.cast(ctxt, self.make_msg('resize_instance',
instance=instance_p, migration=migration_p,
- image=image, reservations=reservations), topic,
- version='2.6')
+ image=image, reservations=reservations,
+ instance_type=instance_type_p), topic,
+ version='2.16')
def resume_instance(self, ctxt, instance):
instance_p = jsonutils.to_primitive(instance)
@@ -545,6 +550,10 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
instance=instance_p),
topic=_compute_topic(self.topic, ctxt, None, instance))
+ def get_backdoor_port(self, ctxt, host):
+ return self.call(ctxt, self.make_msg('get_backdoor_port'),
+ topic=_compute_topic(self.topic, ctxt, host, None))
+
def publish_service_capabilities(self, ctxt):
self.fanout_cast(ctxt, self.make_msg('publish_service_capabilities'))
@@ -584,7 +593,7 @@ class SecurityGroupAPI(nova.openstack.common.rpc.proxy.RpcProxy):
def __init__(self):
super(SecurityGroupAPI, self).__init__(
- topic=FLAGS.compute_topic,
+ topic=CONF.compute_topic,
default_version=self.BASE_RPC_API_VERSION)
def refresh_security_group_rules(self, ctxt, security_group_id, host):
diff --git a/nova/compute/stats.py b/nova/compute/stats.py
index 062fac59f..44b92c6de 100644
--- a/nova/compute/stats.py
+++ b/nova/compute/stats.py
@@ -114,6 +114,10 @@ class Stats(dict):
# save updated I/O workload in stats:
self["io_workload"] = self.io_workload
+ def update_stats_for_migration(self, instance_type, sign=1):
+ x = self.get("num_vcpus_used", 0)
+ self["num_vcpus_used"] = x + (sign * instance_type['vcpus'])
+
def _decrement(self, key):
x = self.get(key, 0)
self[key] = x - 1
diff --git a/nova/compute/utils.py b/nova/compute/utils.py
index 4a284be64..d0428d67b 100644
--- a/nova/compute/utils.py
+++ b/nova/compute/utils.py
@@ -22,16 +22,16 @@ import traceback
from nova import block_device
from nova.compute import instance_types
+from nova import config
from nova import db
from nova import exception
-from nova import flags
from nova.network import model as network_model
from nova import notifications
from nova.openstack.common import log
from nova.openstack.common.notifier import api as notifier_api
from nova import utils
-FLAGS = flags.FLAGS
+CONF = config.CONF
LOG = log.getLogger(__name__)
@@ -86,7 +86,7 @@ def get_device_name_for_instance(context, instance, device):
except (TypeError, AttributeError, ValueError):
raise exception.InvalidDevicePath(path=mappings['root'])
# NOTE(vish): remove this when xenapi is setting default_root_device
- if FLAGS.compute_driver.endswith('xenapi.XenAPIDriver'):
+ if CONF.compute_driver.endswith('xenapi.XenAPIDriver'):
prefix = '/dev/xvd'
if req_prefix != prefix:
LOG.debug(_("Using %(prefix)s instead of %(req_prefix)s") % locals())
@@ -101,7 +101,7 @@ def get_device_name_for_instance(context, instance, device):
# NOTE(vish): remove this when xenapi is properly setting
# default_ephemeral_device and default_swap_device
- if FLAGS.compute_driver.endswith('xenapi.XenAPIDriver'):
+ if CONF.compute_driver.endswith('xenapi.XenAPIDriver'):
instance_type_id = instance['instance_type_id']
instance_type = instance_types.get_instance_type(instance_type_id)
if instance_type['ephemeral_gb']:
@@ -184,11 +184,11 @@ def notify_about_instance_usage(context, instance, event_suffix,
:param extra_usage_info: Dictionary containing extra values to add or
override in the notification.
:param host: Compute host for the instance, if specified. Default is
- FLAGS.host
+ CONF.host
"""
if not host:
- host = FLAGS.host
+ host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
diff --git a/nova/conductor/__init__.py b/nova/conductor/__init__.py
new file mode 100644
index 000000000..036860dbf
--- /dev/null
+++ b/nova/conductor/__init__.py
@@ -0,0 +1,25 @@
+# Copyright 2012 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.conductor import api as conductor_api
+import nova.config
+import nova.openstack.common.importutils
+
+
+def API(*args, **kwargs):
+ if nova.config.CONF.conductor.use_local:
+ api = conductor_api.LocalAPI
+ else:
+ api = conductor_api.API
+ return api(*args, **kwargs)
diff --git a/nova/conductor/api.py b/nova/conductor/api.py
new file mode 100644
index 000000000..acb412625
--- /dev/null
+++ b/nova/conductor/api.py
@@ -0,0 +1,61 @@
+# Copyright 2012 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Handles all requests to the conductor service"""
+
+from nova.conductor import manager
+from nova.conductor import rpcapi
+from nova import config
+from nova.openstack.common import cfg
+
+conductor_opts = [
+ cfg.BoolOpt('use_local',
+ default=False,
+ help='Perform nova-conductor operations locally'),
+ cfg.StrOpt('topic',
+ default='conductor',
+ help='the topic conductor nodes listen on'),
+ cfg.StrOpt('manager',
+ default='nova.conductor.manager.ConductorManager',
+ help='full class name for the Manager for conductor'),
+]
+conductor_group = cfg.OptGroup(name='conductor',
+ title='Conductor Options')
+CONF = config.CONF
+CONF.register_group(conductor_group)
+CONF.register_opts(conductor_opts, conductor_group)
+
+
+class LocalAPI(object):
+ """A local version of the conductor API that does database updates
+ locally instead of via RPC"""
+
+ def __init__(self):
+ self._manager = manager.ConductorManager()
+
+ def instance_update(self, context, instance_uuid, **updates):
+ """Perform an instance update in the database"""
+ return self._manager.instance_update(context, instance_uuid, updates)
+
+
+class API(object):
+ """Conductor API that does updates via RPC to the ConductorManager"""
+
+ def __init__(self):
+ self.conductor_rpcapi = rpcapi.ConductorAPI()
+
+ def instance_update(self, context, instance_uuid, **updates):
+ """Perform an instance update in the database"""
+ return self.conductor_rpcapi.instance_update(context, instance_uuid,
+ updates)
diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py
new file mode 100644
index 000000000..3ffe82645
--- /dev/null
+++ b/nova/conductor/manager.py
@@ -0,0 +1,51 @@
+# Copyright 2012 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Handles database requests from other nova services"""
+
+from nova import manager
+from nova import notifications
+from nova.openstack.common import jsonutils
+from nova.openstack.common import log as logging
+
+
+LOG = logging.getLogger(__name__)
+
+allowed_updates = ['task_state', 'vm_state', 'expected_task_state',
+ 'power_state', 'access_ip_v4', 'access_ip_v6',
+ 'launched_at', 'terminated_at', 'host',
+ 'memory_mb', 'vcpus', 'root_gb', 'ephemeral_gb',
+ 'instance_type_id',
+ ]
+
+
+class ConductorManager(manager.SchedulerDependentManager):
+ """Mission: TBD"""
+
+ RPC_API_VERSION = '1.0'
+
+ def __init__(self, *args, **kwargs):
+ super(ConductorManager, self).__init__(service_name='conductor',
+ *args, **kwargs)
+
+ def instance_update(self, context, instance_uuid, updates):
+ for key in updates:
+ if key not in allowed_updates:
+ LOG.error(_("Instance update attempted for "
+ "'%(key)s' on %(instance_uuid)s") % locals())
+ raise KeyError("unexpected update keyword '%s'" % key)
+ old_ref, instance_ref = self.db.instance_update_and_get_original(
+ context, instance_uuid, updates)
+ notifications.send_update(context, old_ref, instance_ref)
+ return jsonutils.to_primitive(instance_ref)
diff --git a/nova/conductor/rpcapi.py b/nova/conductor/rpcapi.py
new file mode 100644
index 000000000..7a6508f12
--- /dev/null
+++ b/nova/conductor/rpcapi.py
@@ -0,0 +1,43 @@
+# Copyright 2012 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Client side of the conductor RPC API"""
+
+from nova import config
+import nova.openstack.common.rpc.proxy
+
+CONF = config.CONF
+
+
+class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy):
+ """Client side of the conductor RPC API
+
+ API version history:
+
+ 1.0 - Initial version.
+ """
+
+ BASE_RPC_API_VERSION = '1.0'
+
+ def __init__(self):
+ super(ConductorAPI, self).__init__(
+ topic=CONF.conductor.topic,
+ default_version=self.BASE_RPC_API_VERSION)
+
+ def instance_update(self, context, instance_uuid, updates):
+ return self.call(context,
+ self.make_msg('instance_update',
+ instance_uuid=instance_uuid,
+ updates=updates),
+ topic=self.topic)
diff --git a/nova/config.py b/nova/config.py
index 608a3ee53..c991e82ea 100644
--- a/nova/config.py
+++ b/nova/config.py
@@ -17,11 +17,364 @@
# License for the specific language governing permissions and limitations
# under the License.
+import os
+import socket
+
from nova.openstack.common import cfg
CONF = cfg.CONF
+def _get_my_ip():
+ """
+ Returns the actual ip of the local machine.
+
+ This code figures out what source address would be used if some traffic
+ were to be sent out to some well known address on the Internet. In this
+ case, a Google DNS server is used, but the specific address does not
+ matter much. No traffic is actually sent.
+ """
+ try:
+ csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ csock.connect(('8.8.8.8', 80))
+ (addr, port) = csock.getsockname()
+ csock.close()
+ return addr
+ except socket.error:
+ return "127.0.0.1"
+
+
+core_opts = [
+ cfg.StrOpt('sql_connection',
+ default='sqlite:///$state_path/$sqlite_db',
+ help='The SQLAlchemy connection string used to connect to the '
+ 'database'),
+ cfg.StrOpt('api_paste_config',
+ default="api-paste.ini",
+ help='File name for the paste.deploy config for nova-api'),
+ cfg.StrOpt('pybasedir',
+ default=os.path.abspath(os.path.join(os.path.dirname(__file__),
+ '../')),
+ help='Directory where the nova python module is installed'),
+ cfg.StrOpt('bindir',
+ default='$pybasedir/bin',
+ help='Directory where nova binaries are installed'),
+ cfg.StrOpt('state_path',
+ default='$pybasedir',
+ help="Top-level directory for maintaining nova's state"),
+ ]
+
+debug_opts = [
+ cfg.BoolOpt('fake_network',
+ default=False,
+ help='If passed, use fake network devices and addresses'),
+ cfg.IntOpt('sql_connection_debug',
+ default=0,
+ help='Verbosity of SQL debugging information. 0=None, '
+ '100=Everything'),
+ cfg.BoolOpt('sql_connection_trace',
+ default=False,
+ help='Add python stack traces to SQL as comment strings'),
+]
+
+CONF.register_cli_opts(core_opts)
+CONF.register_cli_opts(debug_opts)
+
+global_opts = [
+ cfg.StrOpt('my_ip',
+ default=_get_my_ip(),
+ help='ip address of this host'),
+ cfg.ListOpt('region_list',
+ default=[],
+ help='list of region=fqdn pairs separated by commas'),
+ cfg.StrOpt('aws_access_key_id',
+ default='admin',
+ help='AWS Access ID'),
+ cfg.StrOpt('aws_secret_access_key',
+ default='admin',
+ help='AWS Access Key'),
+ cfg.StrOpt('glance_host',
+ default='$my_ip',
+ help='default glance hostname or ip'),
+ cfg.IntOpt('glance_port',
+ default=9292,
+ help='default glance port'),
+ cfg.ListOpt('glance_api_servers',
+ default=['$glance_host:$glance_port'],
+ help='A list of the glance api servers available to nova. '
+ 'Prefix with https:// for ssl-based glance api servers. '
+ '([hostname|ip]:port)'),
+ cfg.BoolOpt('glance_api_insecure',
+ default=False,
+ help='Allow to perform insecure SSL (https) requests to '
+ 'glance'),
+ cfg.IntOpt('glance_num_retries',
+ default=0,
+ help='Number retries when downloading an image from glance'),
+ cfg.IntOpt('s3_port',
+ default=3333,
+ help='port used when accessing the s3 api'),
+ cfg.StrOpt('s3_host',
+ default='$my_ip',
+ help='hostname or ip for openstack to use when accessing '
+ 'the s3 api'),
+ cfg.StrOpt('cert_topic',
+ default='cert',
+ help='the topic cert nodes listen on'),
+ cfg.StrOpt('compute_topic',
+ default='compute',
+ help='the topic compute nodes listen on'),
+ cfg.StrOpt('console_topic',
+ default='console',
+ help='the topic console proxy nodes listen on'),
+ cfg.StrOpt('scheduler_topic',
+ default='scheduler',
+ help='the topic scheduler nodes listen on'),
+ cfg.StrOpt('network_topic',
+ default='network',
+ help='the topic network nodes listen on'),
+ cfg.BoolOpt('api_rate_limit',
+ default=True,
+ help='whether to rate limit the api'),
+ cfg.ListOpt('enabled_apis',
+ default=['ec2', 'osapi_compute', 'metadata'],
+ help='a list of APIs to enable by default'),
+ cfg.StrOpt('ec2_host',
+ default='$my_ip',
+ help='the ip of the ec2 api server'),
+ cfg.StrOpt('ec2_dmz_host',
+ default='$my_ip',
+ help='the internal ip of the ec2 api server'),
+ cfg.IntOpt('ec2_port',
+ default=8773,
+ help='the port of the ec2 api server'),
+ cfg.StrOpt('ec2_scheme',
+ default='http',
+ help='the protocol to use when connecting to the ec2 api '
+ 'server (http, https)'),
+ cfg.StrOpt('ec2_path',
+ default='/services/Cloud',
+ help='the path prefix used to call the ec2 api server'),
+ cfg.ListOpt('osapi_compute_ext_list',
+ default=[],
+ help='Specify list of extensions to load when using osapi_'
+ 'compute_extension option with nova.api.openstack.'
+ 'compute.contrib.select_extensions'),
+ cfg.MultiStrOpt('osapi_compute_extension',
+ default=[
+ 'nova.api.openstack.compute.contrib.standard_extensions'
+ ],
+ help='osapi compute extension to load'),
+ cfg.StrOpt('osapi_path',
+ default='/v1.1/',
+ help='the path prefix used to call the openstack api server'),
+ cfg.StrOpt('osapi_compute_link_prefix',
+ default=None,
+ help='Base URL that will be presented to users in links '
+ 'to the OpenStack Compute API'),
+ cfg.StrOpt('osapi_glance_link_prefix',
+ default=None,
+ help='Base URL that will be presented to users in links '
+ 'to glance resources'),
+ cfg.IntOpt('osapi_max_limit',
+ default=1000,
+ help='the maximum number of items returned in a single '
+ 'response from a collection resource'),
+ cfg.StrOpt('metadata_host',
+ default='$my_ip',
+ help='the ip for the metadata api server'),
+ cfg.IntOpt('metadata_port',
+ default=8775,
+ help='the port for the metadata api port'),
+ cfg.StrOpt('default_image',
+ default='ami-11111',
+ help='default image to use, testing only'),
+ cfg.StrOpt('default_instance_type',
+ default='m1.small',
+ help='default instance type to use, testing only'),
+ cfg.StrOpt('null_kernel',
+ default='nokernel',
+ help='kernel image that indicates not to use a kernel, but to '
+ 'use a raw disk image instead'),
+ cfg.StrOpt('vpn_image_id',
+ default='0',
+ help='image id used when starting up a cloudpipe vpn server'),
+ cfg.StrOpt('vpn_key_suffix',
+ default='-vpn',
+ help='Suffix to add to project name for vpn key and secgroups'),
+ cfg.StrOpt('sqlite_db',
+ default='nova.sqlite',
+ help='the filename to use with sqlite'),
+ cfg.BoolOpt('sqlite_synchronous',
+ default=True,
+ help='If passed, use synchronous mode for sqlite'),
+ cfg.IntOpt('sql_idle_timeout',
+ default=3600,
+ help='timeout before idle sql connections are reaped'),
+ cfg.IntOpt('sql_max_retries',
+ default=10,
+ help='maximum db connection retries during startup. '
+ '(setting -1 implies an infinite retry count)'),
+ cfg.IntOpt('sql_retry_interval',
+ default=10,
+ help='interval between retries of opening a sql connection'),
+ cfg.IntOpt('sql_pool_size',
+ default=None,
+ help='If set, use this value for pool_size with sqlalchemy'),
+ cfg.IntOpt('sql_max_overflow',
+ default=None,
+ help='If set, use this value for max_overflow with '
+ 'sqlalchemy'),
+ cfg.StrOpt('compute_manager',
+ default='nova.compute.manager.ComputeManager',
+ help='full class name for the Manager for compute'),
+ cfg.StrOpt('console_manager',
+ default='nova.console.manager.ConsoleProxyManager',
+ help='full class name for the Manager for console proxy'),
+ cfg.StrOpt('cert_manager',
+ default='nova.cert.manager.CertManager',
+ help='full class name for the Manager for cert'),
+ cfg.StrOpt('instance_dns_manager',
+ default='nova.network.dns_driver.DNSDriver',
+ help='full class name for the DNS Manager for instance IPs'),
+ cfg.StrOpt('instance_dns_domain',
+ default='',
+ help='full class name for the DNS Zone for instance IPs'),
+ cfg.StrOpt('floating_ip_dns_manager',
+ default='nova.network.dns_driver.DNSDriver',
+ help='full class name for the DNS Manager for floating IPs'),
+ cfg.StrOpt('network_manager',
+ default='nova.network.manager.VlanManager',
+ help='full class name for the Manager for network'),
+ cfg.StrOpt('scheduler_manager',
+ default='nova.scheduler.manager.SchedulerManager',
+ help='full class name for the Manager for scheduler'),
+ cfg.StrOpt('host',
+ default=socket.getfqdn(),
+ help='Name of this node. This can be an opaque identifier. '
+ 'It is not necessarily a hostname, FQDN, or IP address. '
+ 'However, the node name must be valid within '
+ 'an AMQP key, and if using ZeroMQ, a valid '
+ 'hostname, FQDN, or IP address'),
+ cfg.StrOpt('node_availability_zone',
+ default='nova',
+ help='availability zone of this node'),
+ cfg.ListOpt('memcached_servers',
+ default=None,
+ help='Memcached servers or None for in process cache.'),
+ cfg.StrOpt('instance_usage_audit_period',
+ default='month',
+ help='time period to generate instance usages for. '
+ 'Time period must be hour, day, month or year'),
+ cfg.IntOpt('bandwidth_poll_interval',
+ default=600,
+ help='interval to pull bandwidth usage info'),
+ cfg.BoolOpt('start_guests_on_host_boot',
+ default=False,
+ help='Whether to restart guests when the host reboots'),
+ cfg.BoolOpt('resume_guests_state_on_host_boot',
+ default=False,
+ help='Whether to start guests that were running before the '
+ 'host rebooted'),
+ cfg.StrOpt('default_ephemeral_format',
+ default=None,
+ help='The default format an ephemeral_volume will be '
+ 'formatted with on creation.'),
+ cfg.StrOpt('rootwrap_config',
+ default="/etc/nova/rootwrap.conf",
+ help='Path to the rootwrap configuration file to use for '
+ 'running commands as root'),
+ cfg.StrOpt('network_driver',
+ default='nova.network.linux_net',
+ help='Driver to use for network creation'),
+ cfg.BoolOpt('use_ipv6',
+ default=False,
+ help='use ipv6'),
+ cfg.BoolOpt('enable_instance_password',
+ default=True,
+ help='Allows use of instance password during '
+ 'server creation'),
+ cfg.IntOpt('password_length',
+ default=12,
+ help='Length of generated instance admin passwords'),
+ cfg.BoolOpt('monkey_patch',
+ default=False,
+ help='Whether to log monkey patching'),
+ cfg.ListOpt('monkey_patch_modules',
+ default=[
+ 'nova.api.ec2.cloud:nova.notifier.api.notify_decorator',
+ 'nova.compute.api:nova.notifier.api.notify_decorator'
+ ],
+ help='List of modules/decorators to monkey patch'),
+ cfg.BoolOpt('allow_resize_to_same_host',
+ default=False,
+ help='Allow destination machine to match source for resize. '
+ 'Useful when testing in single-host environments.'),
+ cfg.IntOpt('reclaim_instance_interval',
+ default=0,
+ help='Interval in seconds for reclaiming deleted instances'),
+ cfg.IntOpt('zombie_instance_updated_at_window',
+ default=172800,
+ help='Number of seconds zombie instances are cleaned up.'),
+ cfg.IntOpt('service_down_time',
+ default=60,
+ help='maximum time since last check-in for up service'),
+ cfg.StrOpt('default_schedule_zone',
+ default=None,
+ help='availability zone to use when user doesn\'t specify one'),
+ cfg.ListOpt('isolated_images',
+ default=[],
+ help='Images to run on isolated host'),
+ cfg.ListOpt('isolated_hosts',
+ default=[],
+ help='Host reserved for specific images'),
+ cfg.StrOpt('cache_images',
+ default='all',
+ help='Cache glance images locally. `all` will cache all'
+ ' images, `some` will only cache images that have the'
+ ' image_property `cache_in_nova=True`, and `none` turns'
+ ' off caching entirely'),
+ cfg.BoolOpt('use_cow_images',
+ default=True,
+ help='Whether to use cow images'),
+ cfg.StrOpt('compute_api_class',
+ default='nova.compute.api.API',
+ help='The full class name of the compute API class to use'),
+ cfg.StrOpt('network_api_class',
+ default='nova.network.api.API',
+ help='The full class name of the network API class to use'),
+ cfg.StrOpt('volume_api_class',
+ default='nova.volume.cinder.API',
+ help='The full class name of the volume API class to use'),
+ cfg.StrOpt('security_group_handler',
+ default='nova.network.sg.NullSecurityGroupHandler',
+ help='The full class name of the security group handler class'),
+ cfg.StrOpt('default_access_ip_network_name',
+ default=None,
+ help='Name of network to use to set access ips for instances'),
+ cfg.StrOpt('auth_strategy',
+ default='noauth',
+ help='The strategy to use for auth: noauth or keystone.'),
+ cfg.ListOpt('non_inheritable_image_properties',
+ default=['cache_in_nova',
+ 'bittorrent'],
+ help='These are image properties which a snapshot should not'
+ ' inherit from an instance'),
+ cfg.BoolOpt('defer_iptables_apply',
+ default=False,
+ help='Whether to batch up the application of IPTables rules'
+ ' during a host restart and apply all at the end of the'
+ ' init phase'),
+ cfg.StrOpt('compute_driver',
+ help='Driver to use for controlling virtualization. Options '
+ 'include: libvirt.LibvirtDriver, xenapi.XenAPIDriver, '
+ 'fake.FakeDriver, baremetal.BareMetalDriver, '
+ 'vmwareapi.VMWareESXDriver'),
+]
+
+CONF.register_opts(global_opts)
+
+
def parse_args(argv, default_config_files=None):
CONF.disable_interspersed_args()
return argv[:1] + CONF(argv[1:],
diff --git a/nova/console/api.py b/nova/console/api.py
index 5a9294ce7..0ddea69d8 100644
--- a/nova/console/api.py
+++ b/nova/console/api.py
@@ -18,14 +18,13 @@
"""Handles ConsoleProxy API requests."""
from nova.compute import rpcapi as compute_rpcapi
+from nova import config
from nova.console import rpcapi as console_rpcapi
from nova.db import base
-from nova import flags
from nova.openstack.common import rpc
from nova.openstack.common import uuidutils
-
-FLAGS = flags.FLAGS
+CONF = config.CONF
class API(base.Base):
@@ -42,7 +41,7 @@ class API(base.Base):
def delete_console(self, context, instance_uuid, console_uuid):
console = self.db.console_get(context, console_uuid, instance_uuid)
- topic = rpc.queue_get_for(context, FLAGS.console_topic,
+ topic = rpc.queue_get_for(context, CONF.console_topic,
console['pool']['host'])
rpcapi = console_rpcapi.ConsoleAPI(topic=topic)
rpcapi.remove_console(context, console['id'])
diff --git a/nova/console/manager.py b/nova/console/manager.py
index faaf58d0c..b4732844a 100644
--- a/nova/console/manager.py
+++ b/nova/console/manager.py
@@ -20,8 +20,8 @@
import socket
from nova.compute import rpcapi as compute_rpcapi
+from nova import config
from nova import exception
-from nova import flags
from nova import manager
from nova.openstack.common import cfg
from nova.openstack.common import importutils
@@ -41,8 +41,8 @@ console_manager_opts = [
help='Publicly visible name for this console host'),
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(console_manager_opts)
+CONF = config.CONF
+CONF.register_opts(console_manager_opts)
LOG = logging.getLogger(__name__)
@@ -57,7 +57,7 @@ class ConsoleProxyManager(manager.Manager):
def __init__(self, console_driver=None, *args, **kwargs):
if not console_driver:
- console_driver = FLAGS.console_driver
+ console_driver = CONF.console_driver
self.driver = importutils.import_object(console_driver)
super(ConsoleProxyManager, self).__init__(*args, **kwargs)
self.driver.host = self.host
@@ -118,7 +118,7 @@ class ConsoleProxyManager(manager.Manager):
#NOTE(mdragon): Right now, the only place this info exists is the
# compute worker's flagfile, at least for
# xenserver. Thus we ned to ask.
- if FLAGS.stub_compute:
+ if CONF.stub_compute:
pool_info = {'address': '127.0.0.1',
'username': 'test',
'password': '1234pass'}
@@ -128,7 +128,7 @@ class ConsoleProxyManager(manager.Manager):
pool_info['password'] = self.driver.fix_pool_password(
pool_info['password'])
pool_info['host'] = self.host
- pool_info['public_hostname'] = FLAGS.console_public_hostname
+ pool_info['public_hostname'] = CONF.console_public_hostname
pool_info['console_type'] = self.driver.console_type
pool_info['compute_host'] = instance_host
pool = self.db.console_pool_create(context, pool_info)
diff --git a/nova/console/rpcapi.py b/nova/console/rpcapi.py
index a1f289bb0..b401c4c4e 100644
--- a/nova/console/rpcapi.py
+++ b/nova/console/rpcapi.py
@@ -18,11 +18,10 @@
Client side of the console RPC API.
"""
-from nova import flags
+from nova import config
import nova.openstack.common.rpc.proxy
-
-FLAGS = flags.FLAGS
+CONF = config.CONF
class ConsoleAPI(nova.openstack.common.rpc.proxy.RpcProxy):
@@ -44,7 +43,7 @@ class ConsoleAPI(nova.openstack.common.rpc.proxy.RpcProxy):
BASE_RPC_API_VERSION = '1.0'
def __init__(self, topic=None):
- topic = topic if topic else FLAGS.console_topic
+ topic = topic if topic else CONF.console_topic
super(ConsoleAPI, self).__init__(
topic=topic,
default_version=self.BASE_RPC_API_VERSION)
diff --git a/nova/console/vmrc.py b/nova/console/vmrc.py
index a8b934677..a10bf9951 100644
--- a/nova/console/vmrc.py
+++ b/nova/console/vmrc.py
@@ -19,8 +19,8 @@
import base64
+from nova import config
from nova import exception
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova.virt.vmwareapi import vim_util
@@ -35,8 +35,8 @@ vmrc_opts = [
help="number of retries for retrieving VMRC information"),
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(vmrc_opts)
+CONF = config.CONF
+CONF.register_opts(vmrc_opts)
class VMRCConsole(object):
@@ -51,7 +51,7 @@ class VMRCConsole(object):
def get_port(self, context):
"""Get available port for consoles."""
- return FLAGS.console_vmrc_port
+ return CONF.console_vmrc_port
def setup_console(self, context, console):
"""Sets up console."""
diff --git a/nova/console/vmrc_manager.py b/nova/console/vmrc_manager.py
index e654780a3..3990b48b9 100644
--- a/nova/console/vmrc_manager.py
+++ b/nova/console/vmrc_manager.py
@@ -18,8 +18,8 @@
"""VMRC Console Manager."""
from nova.compute import rpcapi as compute_rpcapi
+from nova import config
from nova import exception
-from nova import flags
from nova import manager
from nova.openstack.common import cfg
from nova.openstack.common import importutils
@@ -29,16 +29,16 @@ from nova.virt.vmwareapi import driver as vmwareapi_conn
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
-flags.DECLARE('console_driver', 'nova.console.manager')
-flags.DECLARE('console_public_hostname', 'nova.console.manager')
+CONF = config.CONF
+CONF.import_opt('console_driver', 'nova.console.manager')
+CONF.import_opt('console_public_hostname', 'nova.console.manager')
class ConsoleVMRCManager(manager.Manager):
"""Manager to handle VMRC connections for accessing instance consoles."""
def __init__(self, console_driver=None, *args, **kwargs):
- self.driver = importutils.import_object(FLAGS.console_driver)
+ self.driver = importutils.import_object(CONF.console_driver)
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
super(ConsoleVMRCManager, self).__init__(*args, **kwargs)
@@ -54,7 +54,7 @@ class ConsoleVMRCManager(manager.Manager):
pool['address'],
pool['username'],
pool['password'],
- FLAGS.console_vmrc_error_retries)
+ CONF.console_vmrc_error_retries)
self.sessions[pool['id']] = vim_session
return self.sessions[pool['id']]
@@ -137,8 +137,8 @@ class ConsoleVMRCManager(manager.Manager):
pool_info['host'] = self.host
# ESX Address or Proxy Address
public_host_name = pool_info['address']
- if FLAGS.console_public_hostname:
- public_host_name = FLAGS.console_public_hostname
+ if CONF.console_public_hostname:
+ public_host_name = CONF.console_public_hostname
pool_info['public_hostname'] = public_host_name
pool_info['console_type'] = console_type
pool_info['compute_host'] = instance_host
diff --git a/nova/console/xvp.py b/nova/console/xvp.py
index 60197c766..6bcd85345 100644
--- a/nova/console/xvp.py
+++ b/nova/console/xvp.py
@@ -22,10 +22,10 @@ import signal
from Cheetah import Template
+from nova import config
from nova import context
from nova import db
from nova import exception
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import utils
@@ -49,8 +49,8 @@ xvp_opts = [
help='port for XVP to multiplex VNC connections on'),
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(xvp_opts)
+CONF = config.CONF
+CONF.register_opts(xvp_opts)
LOG = logging.getLogger(__name__)
@@ -58,8 +58,8 @@ class XVPConsoleProxy(object):
"""Sets up XVP config, and manages XVP daemon."""
def __init__(self):
- self.xvpconf_template = open(FLAGS.console_xvp_conf_template).read()
- self.host = FLAGS.host # default, set by manager.
+ self.xvpconf_template = open(CONF.console_xvp_conf_template).read()
+ self.host = CONF.host # default, set by manager.
super(XVPConsoleProxy, self).__init__()
@property
@@ -71,7 +71,7 @@ class XVPConsoleProxy(object):
#TODO(mdragon): implement port selection for non multiplex ports,
# we are not using that, but someone else may want
# it.
- return FLAGS.console_xvp_multiplex_port
+ return CONF.console_xvp_multiplex_port
def setup_console(self, context, console):
"""Sets up actual proxies."""
@@ -104,7 +104,7 @@ class XVPConsoleProxy(object):
LOG.debug('No console pools!')
self._xvp_stop()
return
- conf_data = {'multiplex_port': FLAGS.console_xvp_multiplex_port,
+ conf_data = {'multiplex_port': CONF.console_xvp_multiplex_port,
'pools': pools,
'pass_encode': self.fix_console_password}
config = str(Template.Template(self.xvpconf_template,
@@ -113,8 +113,8 @@ class XVPConsoleProxy(object):
self._xvp_restart()
def _write_conf(self, config):
- LOG.debug(_('Re-wrote %s') % FLAGS.console_xvp_conf)
- with open(FLAGS.console_xvp_conf, 'w') as cfile:
+ LOG.debug(_('Re-wrote %s') % CONF.console_xvp_conf)
+ with open(CONF.console_xvp_conf, 'w') as cfile:
cfile.write(config)
def _xvp_stop(self):
@@ -134,9 +134,9 @@ class XVPConsoleProxy(object):
LOG.debug(_('Starting xvp'))
try:
utils.execute('xvp',
- '-p', FLAGS.console_xvp_pid,
- '-c', FLAGS.console_xvp_conf,
- '-l', FLAGS.console_xvp_log)
+ '-p', CONF.console_xvp_pid,
+ '-c', CONF.console_xvp_conf,
+ '-l', CONF.console_xvp_log)
except exception.ProcessExecutionError, err:
LOG.error(_('Error starting xvp: %s') % err)
@@ -151,7 +151,7 @@ class XVPConsoleProxy(object):
def _xvp_pid(self):
try:
- with open(FLAGS.console_xvp_pid, 'r') as pidfile:
+ with open(CONF.console_xvp_pid, 'r') as pidfile:
pid = int(pidfile.read())
except IOError:
return None
diff --git a/nova/consoleauth/__init__.py b/nova/consoleauth/__init__.py
index 11253ea91..dbff115ff 100644
--- a/nova/consoleauth/__init__.py
+++ b/nova/consoleauth/__init__.py
@@ -19,7 +19,6 @@
"""Module to authenticate Consoles."""
from nova import config
-from nova import flags
from nova.openstack.common import cfg
diff --git a/nova/consoleauth/manager.py b/nova/consoleauth/manager.py
index c772101eb..9a3f03e5a 100644
--- a/nova/consoleauth/manager.py
+++ b/nova/consoleauth/manager.py
@@ -21,7 +21,6 @@
import time
from nova import config
-from nova import flags
from nova import manager
from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
diff --git a/nova/consoleauth/rpcapi.py b/nova/consoleauth/rpcapi.py
index 51d28cb04..822f401bc 100644
--- a/nova/consoleauth/rpcapi.py
+++ b/nova/consoleauth/rpcapi.py
@@ -19,7 +19,6 @@ Client side of the consoleauth RPC API.
"""
from nova import config
-from nova import flags
import nova.openstack.common.rpc.proxy
CONF = config.CONF
diff --git a/nova/context.py b/nova/context.py
index 74f7a3c23..094e2bffb 100644
--- a/nova/context.py
+++ b/nova/context.py
@@ -20,19 +20,19 @@
"""RequestContext: context for requests that persist through all of nova."""
import copy
+import uuid
from nova.openstack.common import local
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import policy
-from nova import utils
LOG = logging.getLogger(__name__)
def generate_request_id():
- return 'req-' + str(utils.gen_uuid())
+ return 'req-' + str(uuid.uuid4())
class RequestContext(object):
diff --git a/nova/crypto.py b/nova/crypto.py
index ef7b40419..52c892378 100644
--- a/nova/crypto.py
+++ b/nova/crypto.py
@@ -32,7 +32,6 @@ from nova import config
from nova import context
from nova import db
from nova import exception
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import fileutils
from nova.openstack.common import log as logging
diff --git a/nova/db/api.py b/nova/db/api.py
index 757f101b3..5b401392a 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -43,8 +43,8 @@ these objects be simple dictionaries.
"""
+from nova import config
from nova import exception
-from nova import flags
from nova.openstack.common import cfg
from nova import utils
@@ -59,16 +59,13 @@ db_opts = [
cfg.StrOpt('instance_name_template',
default='instance-%08x',
help='Template string to be used to generate instance names'),
- cfg.StrOpt('volume_name_template',
- default='volume-%s',
- help='Template string to be used to generate instance names'),
cfg.StrOpt('snapshot_name_template',
default='snapshot-%s',
help='Template string to be used to generate snapshot names'),
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(db_opts)
+CONF = config.CONF
+CONF.register_opts(db_opts)
IMPL = utils.LazyPluggable('db_backend',
sqlalchemy='nova.db.sqlalchemy.api')
@@ -157,15 +154,6 @@ def service_get_all_compute_sorted(context):
return IMPL.service_get_all_compute_sorted(context)
-def service_get_all_volume_sorted(context):
- """Get all volume services sorted by volume count.
-
- :returns: a list of (Service, volume_count) tuples.
-
- """
- return IMPL.service_get_all_volume_sorted(context)
-
-
def service_get_by_args(context, host, binary):
"""Get the state of a service by node name and binary."""
return IMPL.service_get_by_args(context, host, binary)
@@ -417,6 +405,13 @@ def migration_get_unconfirmed_by_dest_compute(context, confirm_window,
confirm_window, dest_compute)
+def migration_get_in_progress_by_host(context, host):
+ """Finds all migrations for the given host that are not yet confirmed or
+ reverted.
+ """
+ return IMPL.migration_get_in_progress_by_host(context, host)
+
+
####################
@@ -477,6 +472,11 @@ def fixed_ip_get_by_address(context, address):
return IMPL.fixed_ip_get_by_address(context, address)
+def fixed_ip_get_by_address_detailed(context, address):
+ """Get detailed fixed ip info by address or raise if it does not exist."""
+ return IMPL.fixed_ip_get_by_address_detailed(context, address)
+
+
def fixed_ip_get_by_instance(context, instance_uuid):
"""Get fixed ips by instance or raise if none exist."""
return IMPL.fixed_ip_get_by_instance(context, instance_uuid)
@@ -492,11 +492,6 @@ def fixed_ips_by_virtual_interface(context, vif_id):
return IMPL.fixed_ips_by_virtual_interface(context, vif_id)
-def fixed_ip_get_network(context, address):
- """Get a network for a fixed ip by address."""
- return IMPL.fixed_ip_get_network(context, address)
-
-
def fixed_ip_update(context, address, values):
"""Create a fixed ip from the values dictionary."""
return IMPL.fixed_ip_update(context, address, values)
@@ -626,6 +621,11 @@ def instance_get_all_by_host(context, host):
return IMPL.instance_get_all_by_host(context, host)
+def instance_get_all_by_host_and_node(context, host, node):
+ """Get all instances belonging to a node."""
+ return IMPL.instance_get_all_by_host_and_node(context, host, node)
+
+
def instance_get_all_by_host_and_not_type(context, host, type_id=None):
"""Get all instances belonging to a host with a different type_id."""
return IMPL.instance_get_all_by_host_and_not_type(context, host, type_id)
@@ -748,11 +748,6 @@ def key_pair_destroy(context, user_id, name):
return IMPL.key_pair_destroy(context, user_id, name)
-def key_pair_destroy_all_by_user(context, user_id):
- """Destroy all key_pairs by user."""
- return IMPL.key_pair_destroy_all_by_user(context, user_id)
-
-
def key_pair_get(context, user_id, name):
"""Get a key_pair or raise if it does not exist."""
return IMPL.key_pair_get(context, user_id, name)
@@ -776,11 +771,6 @@ def network_associate(context, project_id, network_id=None, force=False):
return IMPL.network_associate(context, project_id, network_id, force)
-def network_count(context):
- """Return the number of networks."""
- return IMPL.network_count(context)
-
-
def network_count_reserved_ips(context, network_id):
"""Return the number of reserved ips in the network."""
return IMPL.network_count_reserved_ips(context, network_id)
@@ -805,11 +795,6 @@ def network_delete_safe(context, network_id):
return IMPL.network_delete_safe(context, network_id)
-def network_create_fixed_ips(context, network_id, num_vpn_clients):
- """Create the ips for the network, reserving sepecified ips."""
- return IMPL.network_create_fixed_ips(context, network_id, num_vpn_clients)
-
-
def network_disassociate(context, network_id):
"""Disassociate the network from project or raise if it does not exist."""
return IMPL.network_disassociate(context, network_id)
@@ -870,16 +855,6 @@ def network_get_all_by_host(context, host):
return IMPL.network_get_all_by_host(context, host)
-def network_get_index(context, network_id):
- """Get non-conflicting index for network."""
- return IMPL.network_get_index(context, network_id)
-
-
-def network_set_cidr(context, network_id, cidr):
- """Set the Classless Inner Domain Routing for the network."""
- return IMPL.network_set_cidr(context, network_id, cidr)
-
-
def network_set_host(context, network_id, host_id):
"""Safely set the host for network."""
return IMPL.network_set_host(context, network_id, host_id)
@@ -936,11 +911,6 @@ def quota_update(context, project_id, resource, limit):
return IMPL.quota_update(context, project_id, resource, limit)
-def quota_destroy(context, project_id, resource):
- """Destroy the quota or raise if it does not exist."""
- return IMPL.quota_destroy(context, project_id, resource)
-
-
###################
@@ -964,26 +934,9 @@ def quota_class_update(context, class_name, resource, limit):
return IMPL.quota_class_update(context, class_name, resource, limit)
-def quota_class_destroy(context, class_name, resource):
- """Destroy the quota class or raise if it does not exist."""
- return IMPL.quota_class_destroy(context, class_name, resource)
-
-
-def quota_class_destroy_all_by_name(context, class_name):
- """Destroy all quotas associated with a given quota class."""
- return IMPL.quota_class_destroy_all_by_name(context, class_name)
-
-
###################
-def quota_usage_create(context, project_id, resource, in_use, reserved,
- until_refresh):
- """Create a quota usage for the given project and resource."""
- return IMPL.quota_usage_create(context, project_id, resource,
- in_use, reserved, until_refresh)
-
-
def quota_usage_get(context, project_id, resource):
"""Retrieve a quota usage or raise if it does not exist."""
return IMPL.quota_usage_get(context, project_id, resource)
@@ -999,11 +952,6 @@ def quota_usage_update(context, project_id, resource, **kwargs):
return IMPL.quota_usage_update(context, project_id, resource, **kwargs)
-def quota_usage_destroy(context, project_id, resource):
- """Destroy the quota usage or raise if it does not exist."""
- return IMPL.quota_usage_destroy(context, project_id, resource)
-
-
###################
@@ -1019,11 +967,6 @@ def reservation_get(context, uuid):
return IMPL.reservation_get(context, uuid)
-def reservation_get_all_by_project(context, project_id):
- """Retrieve all reservations associated with a given project."""
- return IMPL.reservation_get_all_by_project(context, project_id)
-
-
def reservation_destroy(context, uuid):
"""Destroy the reservation or raise if it does not exist."""
return IMPL.reservation_destroy(context, uuid)
@@ -1062,81 +1005,11 @@ def reservation_expire(context):
###################
-def volume_allocate_iscsi_target(context, volume_id, host):
- """Atomically allocate a free iscsi_target from the pool."""
- return IMPL.volume_allocate_iscsi_target(context, volume_id, host)
-
-
-def volume_attached(context, volume_id, instance_id, mountpoint):
- """Ensure that a volume is set as attached."""
- return IMPL.volume_attached(context, volume_id, instance_id, mountpoint)
-
-
-def volume_create(context, values):
- """Create a volume from the values dictionary."""
- return IMPL.volume_create(context, values)
-
-
-def volume_data_get_for_project(context, project_id, session=None):
- """Get (volume_count, gigabytes) for project."""
- return IMPL.volume_data_get_for_project(context, project_id,
- session=session)
-
-
-def volume_destroy(context, volume_id):
- """Destroy the volume or raise if it does not exist."""
- return IMPL.volume_destroy(context, volume_id)
-
-
-def volume_detached(context, volume_id):
- """Ensure that a volume is set as detached."""
- return IMPL.volume_detached(context, volume_id)
-
-
-def volume_get(context, volume_id):
- """Get a volume or raise if it does not exist."""
- return IMPL.volume_get(context, volume_id)
-
-
-def volume_get_all(context):
- """Get all volumes."""
- return IMPL.volume_get_all(context)
-
-
-def volume_get_all_by_host(context, host):
- """Get all volumes belonging to a host."""
- return IMPL.volume_get_all_by_host(context, host)
-
-
-def volume_get_all_by_instance_uuid(context, instance_uuid):
- """Get all volumes belonging to an instance."""
- return IMPL.volume_get_all_by_instance_uuid(context, instance_uuid)
-
-
-def volume_get_all_by_project(context, project_id):
- """Get all volumes belonging to a project."""
- return IMPL.volume_get_all_by_project(context, project_id)
-
-
-def volume_get_by_ec2_id(context, ec2_id):
- """Get a volume by ec2 id."""
- return IMPL.volume_get_by_ec2_id(context, ec2_id)
-
-
def volume_get_iscsi_target_num(context, volume_id):
"""Get the target num (tid) allocated to the volume."""
return IMPL.volume_get_iscsi_target_num(context, volume_id)
-def volume_update(context, volume_id, values):
- """Set the given properties on a volume and update it.
-
- Raises NotFound if volume does not exist.
-
- """
- return IMPL.volume_update(context, volume_id, values)
-
-
def get_ec2_volume_id_by_uuid(context, volume_id):
return IMPL.get_ec2_volume_id_by_uuid(context, volume_id)
@@ -1164,48 +1037,6 @@ def ec2_snapshot_create(context, snapshot_id, forced_id=None):
####################
-def snapshot_create(context, values):
- """Create a snapshot from the values dictionary."""
- return IMPL.snapshot_create(context, values)
-
-
-def snapshot_destroy(context, snapshot_id):
- """Destroy the snapshot or raise if it does not exist."""
- return IMPL.snapshot_destroy(context, snapshot_id)
-
-
-def snapshot_get(context, snapshot_id):
- """Get a snapshot or raise if it does not exist."""
- return IMPL.snapshot_get(context, snapshot_id)
-
-
-def snapshot_get_all(context):
- """Get all snapshots."""
- return IMPL.snapshot_get_all(context)
-
-
-def snapshot_get_all_by_project(context, project_id):
- """Get all snapshots belonging to a project."""
- return IMPL.snapshot_get_all_by_project(context, project_id)
-
-
-def snapshot_get_all_for_volume(context, volume_id):
- """Get all snapshots for a volume."""
- return IMPL.snapshot_get_all_for_volume(context, volume_id)
-
-
-def snapshot_update(context, snapshot_id, values):
- """Set the given properties on a snapshot and update it.
-
- Raises NotFound if snapshot does not exist.
-
- """
- return IMPL.snapshot_update(context, snapshot_id, values)
-
-
-####################
-
-
def block_device_mapping_create(context, values):
"""Create an entry of block device mapping"""
return IMPL.block_device_mapping_create(context, values)
@@ -1503,11 +1334,6 @@ def instance_system_metadata_get(context, instance_uuid):
return IMPL.instance_system_metadata_get(context, instance_uuid)
-def instance_system_metadata_delete(context, instance_uuid, key):
- """Delete the given system metadata item."""
- IMPL.instance_system_metadata_delete(context, instance_uuid, key)
-
-
def instance_system_metadata_update(context, instance_uuid, metadata, delete):
"""Update metadata if it exists, otherwise create it."""
IMPL.instance_system_metadata_update(
@@ -1586,80 +1412,6 @@ def instance_type_extra_specs_update_or_create(context, flavor_id,
extra_specs)
-##################
-
-
-def volume_metadata_get(context, volume_id):
- """Get all metadata for a volume."""
- return IMPL.volume_metadata_get(context, volume_id)
-
-
-def volume_metadata_delete(context, volume_id, key):
- """Delete the given metadata item."""
- IMPL.volume_metadata_delete(context, volume_id, key)
-
-
-def volume_metadata_update(context, volume_id, metadata, delete):
- """Update metadata if it exists, otherwise create it."""
- IMPL.volume_metadata_update(context, volume_id, metadata, delete)
-
-
-##################
-
-
-def volume_type_create(context, values):
- """Create a new volume type."""
- return IMPL.volume_type_create(context, values)
-
-
-def volume_type_get_all(context, inactive=False):
- """Get all volume types."""
- return IMPL.volume_type_get_all(context, inactive)
-
-
-def volume_type_get(context, id):
- """Get volume type by id."""
- return IMPL.volume_type_get(context, id)
-
-
-def volume_type_get_by_name(context, name):
- """Get volume type by name."""
- return IMPL.volume_type_get_by_name(context, name)
-
-
-def volume_type_destroy(context, name):
- """Delete a volume type."""
- return IMPL.volume_type_destroy(context, name)
-
-
-def volume_get_active_by_window(context, begin, end=None, project_id=None):
- """Get all the volumes inside the window.
-
- Specifying a project_id will filter for a certain project."""
- return IMPL.volume_get_active_by_window(context, begin, end, project_id)
-
-
-####################
-
-
-def volume_type_extra_specs_get(context, volume_type_id):
- """Get all extra specs for a volume type."""
- return IMPL.volume_type_extra_specs_get(context, volume_type_id)
-
-
-def volume_type_extra_specs_delete(context, volume_type_id, key):
- """Delete the given extra specs item."""
- IMPL.volume_type_extra_specs_delete(context, volume_type_id, key)
-
-
-def volume_type_extra_specs_update_or_create(context, volume_type_id,
- extra_specs):
- """Create or update volume type extra specs. This adds or modifies the
- key/value pairs specified in the extra specs dict argument"""
- IMPL.volume_type_extra_specs_update_or_create(context, volume_type_id,
- extra_specs)
-
-
###################
@@ -1681,100 +1433,6 @@ def s3_image_create(context, image_uuid):
####################
-def sm_backend_conf_create(context, values):
- """Create a new SM Backend Config entry."""
- return IMPL.sm_backend_conf_create(context, values)
-
-
-def sm_backend_conf_update(context, sm_backend_conf_id, values):
- """Update a SM Backend Config entry."""
- return IMPL.sm_backend_conf_update(context, sm_backend_conf_id, values)
-
-
-def sm_backend_conf_delete(context, sm_backend_conf_id):
- """Delete a SM Backend Config."""
- return IMPL.sm_backend_conf_delete(context, sm_backend_conf_id)
-
-
-def sm_backend_conf_get(context, sm_backend_conf_id):
- """Get a specific SM Backend Config."""
- return IMPL.sm_backend_conf_get(context, sm_backend_conf_id)
-
-
-def sm_backend_conf_get_by_sr(context, sr_uuid):
- """Get a specific SM Backend Config."""
- return IMPL.sm_backend_conf_get_by_sr(context, sr_uuid)
-
-
-def sm_backend_conf_get_all(context):
- """Get all SM Backend Configs."""
- return IMPL.sm_backend_conf_get_all(context)
-
-
-####################
-
-
-def sm_flavor_create(context, values):
- """Create a new SM Flavor entry."""
- return IMPL.sm_flavor_create(context, values)
-
-
-def sm_flavor_update(context, sm_flavor_id, values):
- """Update a SM Flavor entry."""
- return IMPL.sm_flavor_update(context, sm_flavor_id, values)
-
-
-def sm_flavor_delete(context, sm_flavor_id):
- """Delete a SM Flavor."""
- return IMPL.sm_flavor_delete(context, sm_flavor_id)
-
-
-def sm_flavor_get(context, sm_flavor_id):
- """Get a specific SM Flavor."""
- return IMPL.sm_flavor_get(context, sm_flavor_id)
-
-
-def sm_flavor_get_all(context):
- """Get all SM Flavors."""
- return IMPL.sm_flavor_get_all(context)
-
-
-def sm_flavor_get_by_label(context, sm_flavor_label):
- """Get a specific SM Flavor given label."""
- return IMPL.sm_flavor_get_by_label(context, sm_flavor_label)
-
-
-####################
-
-
-def sm_volume_create(context, values):
- """Create a new child Zone entry."""
- return IMPL.sm_volume_create(context, values)
-
-
-def sm_volume_update(context, volume_id, values):
- """Update a child Zone entry."""
- return IMPL.sm_volume_update(context, values)
-
-
-def sm_volume_delete(context, volume_id):
- """Delete a child Zone."""
- return IMPL.sm_volume_delete(context, volume_id)
-
-
-def sm_volume_get(context, volume_id):
- """Get a specific child Zone."""
- return IMPL.sm_volume_get(context, volume_id)
-
-
-def sm_volume_get_all(context):
- """Get all child Zones."""
- return IMPL.sm_volume_get_all(context)
-
-
-####################
-
-
def aggregate_create(context, values, metadata=None):
"""Create a new aggregate with metadata."""
return IMPL.aggregate_create(context, values, metadata)
diff --git a/nova/db/base.py b/nova/db/base.py
index 0ec6ad6e2..650edd42f 100644
--- a/nova/db/base.py
+++ b/nova/db/base.py
@@ -18,17 +18,16 @@
"""Base class for classes that need modular database access."""
-from nova import flags
+from nova import config
from nova.openstack.common import cfg
from nova.openstack.common import importutils
-
db_driver_opt = cfg.StrOpt('db_driver',
default='nova.db',
help='driver to use for database access')
-FLAGS = flags.FLAGS
-FLAGS.register_opt(db_driver_opt)
+CONF = config.CONF
+CONF.register_opt(db_driver_opt)
class Base(object):
@@ -36,5 +35,5 @@ class Base(object):
def __init__(self, db_driver=None):
if not db_driver:
- db_driver = FLAGS.db_driver
+ db_driver = CONF.db_driver
self.db = importutils.import_module(db_driver) # pylint: disable=C0103
diff --git a/nova/db/migration.py b/nova/db/migration.py
index 76b70e14d..5ffa7cdfb 100644
--- a/nova/db/migration.py
+++ b/nova/db/migration.py
@@ -24,7 +24,7 @@ from nova import utils
IMPL = utils.LazyPluggable('db_backend',
sqlalchemy='nova.db.sqlalchemy.migration')
-INIT_VERSION = 81
+INIT_VERSION = 132
def db_sync(version=None):
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 2f33eda0c..34fbec6d3 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -23,6 +23,7 @@ import collections
import copy
import datetime
import functools
+import uuid
from sqlalchemy import and_
from sqlalchemy.exc import IntegrityError
@@ -37,18 +38,17 @@ from sqlalchemy.sql import func
from nova import block_device
from nova.common.sqlalchemyutils import paginate_query
from nova.compute import vm_states
+from nova import config
from nova import db
from nova.db.sqlalchemy import models
from nova.db.sqlalchemy.session import get_session
from nova import exception
-from nova import flags
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
-from nova import utils
-FLAGS = flags.FLAGS
+CONF = config.CONF
LOG = logging.getLogger(__name__)
@@ -139,20 +139,6 @@ def require_instance_exists_using_uuid(f):
return wrapper
-def require_volume_exists(f):
- """Decorator to require the specified volume to exist.
-
- Requires the wrapped function to use context and volume_id as
- their first two arguments.
- """
-
- def wrapper(context, volume_id, *args, **kwargs):
- db.volume_get(context, volume_id)
- return f(context, volume_id, *args, **kwargs)
- wrapper.__name__ = f.__name__
- return wrapper
-
-
def require_aggregate_exists(f):
"""Decorator to require the specified aggregate to exist.
@@ -312,7 +298,7 @@ def service_destroy(context, service_id):
service_ref = service_get(context, service_id, session=session)
service_ref.delete(session=session)
- if (service_ref.topic == FLAGS.compute_topic and
+ if (service_ref.topic == CONF.compute_topic and
service_ref.compute_node):
for c in service_ref.compute_node:
c.delete(session=session)
@@ -369,7 +355,7 @@ def service_get_all_compute_by_host(context, host):
result = model_query(context, models.Service, read_deleted="no").\
options(joinedload('compute_node')).\
filter_by(host=host).\
- filter_by(topic=FLAGS.compute_topic).\
+ filter_by(topic=CONF.compute_topic).\
all()
if not result:
@@ -402,7 +388,7 @@ def service_get_all_compute_sorted(context):
# (SELECT host, SUM(instances.vcpus) AS instance_cores
# FROM instances GROUP BY host) AS inst_cores
# ON services.host = inst_cores.host
- topic = FLAGS.compute_topic
+ topic = CONF.compute_topic
label = 'instance_cores'
subq = model_query(context, models.Instance.host,
func.sum(models.Instance.vcpus).label(label),
@@ -417,24 +403,6 @@ def service_get_all_compute_sorted(context):
@require_admin_context
-def service_get_all_volume_sorted(context):
- session = get_session()
- with session.begin():
- topic = FLAGS.volume_topic
- label = 'volume_gigabytes'
- subq = model_query(context, models.Volume.host,
- func.sum(models.Volume.size).label(label),
- session=session, read_deleted="no").\
- group_by(models.Volume.host).\
- subquery()
- return _service_get_all_topic_subquery(context,
- session,
- topic,
- subq,
- label)
-
-
-@require_admin_context
def service_get_by_args(context, host, binary):
result = model_query(context, models.Service).\
filter_by(host=host).\
@@ -451,7 +419,7 @@ def service_get_by_args(context, host, binary):
def service_create(context, values):
service_ref = models.Service()
service_ref.update(values)
- if not FLAGS.enable_new_services:
+ if not CONF.enable_new_services:
service_ref.disabled = True
service_ref.save()
return service_ref
@@ -987,24 +955,15 @@ def dnsdomain_register_for_project(context, fqdomain, project):
@require_admin_context
def dnsdomain_unregister(context, fqdomain):
- session = get_session()
- with session.begin():
- session.query(models.DNSDomain).\
- filter_by(domain=fqdomain).\
- delete()
+ model_query(context, models.DNSDomain).\
+ filter_by(domain=fqdomain).\
+ delete()
@require_context
def dnsdomain_list(context):
- session = get_session()
- records = model_query(context, models.DNSDomain,
- session=session, read_deleted="no").\
- all()
- domains = []
- for record in records:
- domains.append(record.domain)
-
- return domains
+ query = model_query(context, models.DNSDomain, read_deleted="no")
+ return [row.domain for row in query.all()]
###################
@@ -1191,6 +1150,30 @@ def fixed_ip_get_by_address(context, address, session=None):
return result
+@require_admin_context
+def fixed_ip_get_by_address_detailed(context, address, session=None):
+ """
+ :returns: a tuple of (models.FixedIp, models.Network, models.Instance)
+ """
+ if not session:
+ session = get_session()
+
+ result = session.query(models.FixedIp, models.Network, models.Instance).\
+ filter_by(address=address).\
+ outerjoin((models.Network,
+ models.Network.id ==
+ models.FixedIp.network_id)).\
+ outerjoin((models.Instance,
+ models.Instance.uuid ==
+ models.FixedIp.instance_uuid)).\
+ first()
+
+ if not result:
+ raise exception.FixedIpNotFoundForAddress(address=address)
+
+ return result
+
+
@require_context
def fixed_ip_get_by_instance(context, instance_uuid):
if not uuidutils.is_uuid_like(instance_uuid):
@@ -1228,12 +1211,6 @@ def fixed_ips_by_virtual_interface(context, vif_id):
return result
-@require_admin_context
-def fixed_ip_get_network(context, address):
- fixed_ip_ref = fixed_ip_get_by_address(context, address)
- return fixed_ip_ref.network
-
-
@require_context
def fixed_ip_update(context, address, values):
session = get_session()
@@ -1271,12 +1248,12 @@ def _virtual_interface_query(context, session=None):
@require_context
-def virtual_interface_get(context, vif_id, session=None):
+def virtual_interface_get(context, vif_id):
"""Gets a virtual interface from the table.
:param vif_id: = id of the virtual interface
"""
- vif_ref = _virtual_interface_query(context, session=session).\
+ vif_ref = _virtual_interface_query(context).\
filter_by(id=vif_id).\
first()
return vif_ref
@@ -1336,10 +1313,9 @@ def virtual_interface_delete(context, vif_id):
:param vif_id: = id of vif to delete
"""
- session = get_session()
- vif_ref = virtual_interface_get(context, vif_id, session)
- with session.begin():
- session.delete(vif_ref)
+ _virtual_interface_query(context).\
+ filter_by(id=vif_id).\
+ delete()
@require_context
@@ -1349,9 +1325,9 @@ def virtual_interface_delete_by_instance(context, instance_uuid):
:param instance_uuid: = uuid of instance
"""
- vif_refs = virtual_interface_get_by_instance(context, instance_uuid)
- for vif_ref in vif_refs:
- virtual_interface_delete(context, vif_ref['id'])
+ _virtual_interface_query(context).\
+ filter_by(instance_uuid=instance_uuid).\
+ delete()
@require_context
@@ -1391,7 +1367,7 @@ def instance_create(context, values):
instance_ref = models.Instance()
if not values.get('uuid'):
- values['uuid'] = str(utils.gen_uuid())
+ values['uuid'] = str(uuid.uuid4())
instance_ref['info_cache'] = models.InstanceInfoCache()
info_cache = values.pop('info_cache', None)
if info_cache is not None:
@@ -1502,7 +1478,8 @@ def _build_instance_get(context, session=None):
options(joinedload_all('security_groups.rules')).\
options(joinedload('info_cache')).\
options(joinedload('metadata')).\
- options(joinedload('instance_type'))
+ options(joinedload('instance_type')).\
+ options(joinedload('system_metadata'))
@require_context
@@ -1535,6 +1512,7 @@ def instance_get_all_by_filters(context, filters, sort_key, sort_dir,
query_prefix = session.query(models.Instance).\
options(joinedload('info_cache')).\
options(joinedload('security_groups')).\
+ options(joinedload('system_metadata')).\
options(joinedload('metadata')).\
options(joinedload('instance_type')).\
order_by(sort_fn[sort_dir](getattr(models.Instance, sort_key)))
@@ -1610,7 +1588,7 @@ def regex_filter(query, model, filters):
'oracle': 'REGEXP_LIKE',
'sqlite': 'REGEXP'
}
- db_string = FLAGS.sql_connection.split(':')[0].split('+')[0]
+ db_string = CONF.sql_connection.split(':')[0].split('+')[0]
db_regexp_op = regexp_op_map.get(db_string, 'LIKE')
for filter_name in filters.iterkeys():
try:
@@ -1681,6 +1659,12 @@ def instance_get_all_by_host(context, host):
@require_admin_context
+def instance_get_all_by_host_and_node(context, host, node):
+ return _instance_get_all_query(context).filter_by(host=host).\
+ filter_by(node=node).all()
+
+
+@require_admin_context
def instance_get_all_by_host_and_not_type(context, host, type_id=None):
return _instance_get_all_query(context).filter_by(host=host).\
filter(models.Instance.instance_type_id != type_id).all()
@@ -1823,6 +1807,18 @@ def _instance_update(context, instance_uuid, values, copy_old_instance=False):
with session.begin():
instance_ref = instance_get_by_uuid(context, instance_uuid,
session=session)
+ # TODO(deva): remove extra_specs from here after it is included
+ # in system_metadata. Until then, the baremetal driver
+ # needs extra_specs added to instance[]
+ inst_type_ref = _instance_type_get_query(context, session=session).\
+ filter_by(id=instance_ref['instance_type_id']).\
+ first()
+ if inst_type_ref:
+ instance_ref['extra_specs'] = \
+ _dict_with_extra_specs(inst_type_ref).get('extra_specs', {})
+ else:
+ instance_ref['extra_specs'] = {}
+
if "expected_task_state" in values:
# it is not a db column so always pop out
expected = values.pop("expected_task_state")
@@ -1852,6 +1848,13 @@ def _instance_update(context, instance_uuid, values, copy_old_instance=False):
instance_ref.update(values)
instance_ref.save(session=session)
+ if 'instance_type_id' in values:
+ # NOTE(comstud): It appears that sqlalchemy doesn't refresh
+ # the instance_type model after you update the ID. You end
+ # up with an instance_type model that only has 'id' updated,
+ # but the rest of the model has the data from the old
+ # instance_type.
+ session.refresh(instance_ref['instance_type'])
return (old_instance_ref, instance_ref)
@@ -1978,18 +1981,6 @@ def key_pair_destroy(context, user_id, name):
@require_context
-def key_pair_destroy_all_by_user(context, user_id):
- authorize_user_context(context, user_id)
- session = get_session()
- with session.begin():
- session.query(models.KeyPair).\
- filter_by(user_id=user_id).\
- update({'deleted': True,
- 'deleted_at': timeutils.utcnow(),
- 'updated_at': literal_column('updated_at')})
-
-
-@require_context
def key_pair_get(context, user_id, name, session=None):
authorize_user_context(context, user_id)
result = model_query(context, models.KeyPair, session=session).\
@@ -2072,11 +2063,6 @@ def network_associate(context, project_id, network_id=None, force=False):
@require_admin_context
-def network_count(context):
- return model_query(context, models.Network).count()
-
-
-@require_admin_context
def _network_ips_query(context, network_id):
return model_query(context, models.FixedIp, read_deleted="no").\
filter_by(network_id=network_id)
@@ -2098,7 +2084,7 @@ def network_create_safe(context, values):
raise exception.DuplicateVlan(vlan=values['vlan'])
network_ref = models.Network()
- network_ref['uuid'] = str(utils.gen_uuid())
+ network_ref['uuid'] = str(uuid.uuid4())
network_ref.update(values)
try:
@@ -2375,9 +2361,8 @@ def iscsi_target_create_safe(context, values):
@require_context
-def quota_get(context, project_id, resource, session=None):
- result = model_query(context, models.Quota, session=session,
- read_deleted="no").\
+def quota_get(context, project_id, resource):
+ result = model_query(context, models.Quota, read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(resource=resource).\
first()
@@ -2415,28 +2400,21 @@ def quota_create(context, project_id, resource, limit):
@require_admin_context
def quota_update(context, project_id, resource, limit):
- session = get_session()
- with session.begin():
- quota_ref = quota_get(context, project_id, resource, session=session)
- quota_ref.hard_limit = limit
- quota_ref.save(session=session)
-
+ result = model_query(context, models.Quota, read_deleted="no").\
+ filter_by(project_id=project_id).\
+ filter_by(resource=resource).\
+ update({'hard_limit': limit})
-@require_admin_context
-def quota_destroy(context, project_id, resource):
- session = get_session()
- with session.begin():
- quota_ref = quota_get(context, project_id, resource, session=session)
- quota_ref.delete(session=session)
+ if not result:
+ raise exception.ProjectQuotaNotFound(project_id=project_id)
###################
@require_context
-def quota_class_get(context, class_name, resource, session=None):
- result = model_query(context, models.QuotaClass, session=session,
- read_deleted="no").\
+def quota_class_get(context, class_name, resource):
+ result = model_query(context, models.QuotaClass, read_deleted="no").\
filter_by(class_name=class_name).\
filter_by(resource=resource).\
first()
@@ -2474,43 +2452,21 @@ def quota_class_create(context, class_name, resource, limit):
@require_admin_context
def quota_class_update(context, class_name, resource, limit):
- session = get_session()
- with session.begin():
- quota_class_ref = quota_class_get(context, class_name, resource,
- session=session)
- quota_class_ref.hard_limit = limit
- quota_class_ref.save(session=session)
-
-
-@require_admin_context
-def quota_class_destroy(context, class_name, resource):
- session = get_session()
- with session.begin():
- quota_class_ref = quota_class_get(context, class_name, resource,
- session=session)
- quota_class_ref.delete(session=session)
-
-
-@require_admin_context
-def quota_class_destroy_all_by_name(context, class_name):
- session = get_session()
- with session.begin():
- quota_classes = model_query(context, models.QuotaClass,
- session=session, read_deleted="no").\
- filter_by(class_name=class_name).\
- all()
+ result = model_query(context, models.QuotaClass, read_deleted="no").\
+ filter_by(class_name=class_name).\
+ filter_by(resource=resource).\
+ update({'hard_limit': limit})
- for quota_class_ref in quota_classes:
- quota_class_ref.delete(session=session)
+ if not result:
+ raise exception.QuotaClassNotFound(class_name=class_name)
###################
@require_context
-def quota_usage_get(context, project_id, resource, session=None):
- result = model_query(context, models.QuotaUsage, session=session,
- read_deleted="no").\
+def quota_usage_get(context, project_id, resource):
+ result = model_query(context, models.QuotaUsage, read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(resource=resource).\
first()
@@ -2537,7 +2493,7 @@ def quota_usage_get_all_by_project(context, project_id):
@require_admin_context
-def quota_usage_create(context, project_id, resource, in_use, reserved,
+def _quota_usage_create(context, project_id, resource, in_use, reserved,
until_refresh, session=None):
quota_usage_ref = models.QuotaUsage()
quota_usage_ref.project_id = project_id
@@ -2545,49 +2501,37 @@ def quota_usage_create(context, project_id, resource, in_use, reserved,
quota_usage_ref.in_use = in_use
quota_usage_ref.reserved = reserved
quota_usage_ref.until_refresh = until_refresh
+
quota_usage_ref.save(session=session)
return quota_usage_ref
@require_admin_context
-def quota_usage_update(context, project_id, resource, session=None, **kwargs):
- def do_update(session):
- quota_usage_ref = quota_usage_get(context, project_id, resource,
- session=session)
- if 'in_use' in kwargs:
- quota_usage_ref.in_use = kwargs['in_use']
- if 'reserved' in kwargs:
- quota_usage_ref.reserved = kwargs['reserved']
- if 'until_refresh' in kwargs:
- quota_usage_ref.until_refresh = kwargs['until_refresh']
- quota_usage_ref.save(session=session)
-
- if session:
- # Assume caller started a transaction
- do_update(session)
- else:
- session = get_session()
- with session.begin():
- do_update(session)
+def quota_usage_update(context, project_id, resource, **kwargs):
+ updates = {}
+ if 'in_use' in kwargs:
+ updates['in_use'] = kwargs['in_use']
+ if 'reserved' in kwargs:
+ updates['reserved'] = kwargs['reserved']
+ if 'until_refresh' in kwargs:
+ updates['until_refresh'] = kwargs['until_refresh']
+ result = model_query(context, models.QuotaUsage, read_deleted="no").\
+ filter_by(project_id=project_id).\
+ filter_by(resource=resource).\
+ update(updates)
-@require_admin_context
-def quota_usage_destroy(context, project_id, resource):
- session = get_session()
- with session.begin():
- quota_usage_ref = quota_usage_get(context, project_id, resource,
- session=session)
- quota_usage_ref.delete(session=session)
+ if not result:
+ raise exception.QuotaUsageNotFound(project_id=project_id)
###################
@require_context
-def reservation_get(context, uuid, session=None):
- result = model_query(context, models.Reservation, session=session,
- read_deleted="no").\
+def reservation_get(context, uuid):
+ result = model_query(context, models.Reservation, read_deleted="no").\
filter_by(uuid=uuid).\
first()
@@ -2597,22 +2541,6 @@ def reservation_get(context, uuid, session=None):
return result
-@require_context
-def reservation_get_all_by_project(context, project_id):
- authorize_project_context(context, project_id)
-
- rows = model_query(context, models.QuotaUsage, read_deleted="no").\
- filter_by(project_id=project_id).\
- all()
-
- result = {'project_id': project_id}
- for row in rows:
- result.setdefault(row.resource, {})
- result[row.resource][row.uuid] = row.delta
-
- return result
-
-
@require_admin_context
def reservation_create(context, uuid, usage, project_id, resource, delta,
expire, session=None):
@@ -2629,10 +2557,12 @@ def reservation_create(context, uuid, usage, project_id, resource, delta,
@require_admin_context
def reservation_destroy(context, uuid):
- session = get_session()
- with session.begin():
- reservation_ref = reservation_get(context, uuid, session=session)
- reservation_ref.delete(session=session)
+ result = model_query(context, models.Reservation, read_deleted="no").\
+ filter_by(uuid=uuid).\
+ delete()
+
+ if not result:
+ raise exception.ReservationNotFound(uuid=uuid)
###################
@@ -2671,7 +2601,7 @@ def quota_reserve(context, resources, quotas, deltas, expire,
# Do we need to refresh the usage?
refresh = False
if resource not in usages:
- usages[resource] = quota_usage_create(elevated,
+ usages[resource] = _quota_usage_create(elevated,
context.project_id,
resource,
0, 0,
@@ -2699,7 +2629,7 @@ def quota_reserve(context, resources, quotas, deltas, expire,
for res, in_use in updates.items():
# Make sure we have a destination for the usage!
if res not in usages:
- usages[res] = quota_usage_create(elevated,
+ usages[res] = _quota_usage_create(elevated,
context.project_id,
res,
0, 0,
@@ -2748,7 +2678,7 @@ def quota_reserve(context, resources, quotas, deltas, expire,
reservations = []
for resource, delta in deltas.items():
reservation = reservation_create(elevated,
- str(utils.gen_uuid()),
+ str(uuid.uuid4()),
usages[resource],
context.project_id,
resource, delta, expire,
@@ -2884,116 +2814,6 @@ def reservation_expire(context):
###################
-@require_admin_context
-def volume_allocate_iscsi_target(context, volume_id, host):
- session = get_session()
- with session.begin():
- iscsi_target_ref = model_query(context, models.IscsiTarget,
- session=session, read_deleted="no").\
- filter_by(volume=None).\
- filter_by(host=host).\
- with_lockmode('update').\
- first()
-
- # NOTE(vish): if with_lockmode isn't supported, as in sqlite,
- # then this has concurrency issues
- if not iscsi_target_ref:
- raise db.NoMoreTargets()
-
- iscsi_target_ref.volume_id = volume_id
- session.add(iscsi_target_ref)
-
- return iscsi_target_ref.target_num
-
-
-@require_admin_context
-def volume_attached(context, volume_id, instance_uuid, mountpoint):
- if not uuidutils.is_uuid_like(instance_uuid):
- raise exception.InvalidUUID(instance_uuid)
-
- session = get_session()
- with session.begin():
- volume_ref = volume_get(context, volume_id, session=session)
- volume_ref['status'] = 'in-use'
- volume_ref['mountpoint'] = mountpoint
- volume_ref['attach_status'] = 'attached'
- volume_ref['instance_uuid'] = instance_uuid
- volume_ref['attach_time'] = timeutils.utcnow()
- volume_ref.save(session=session)
-
-
-@require_context
-def volume_create(context, values):
- values['volume_metadata'] = _metadata_refs(values.get('metadata'),
- models.VolumeMetadata)
- volume_ref = models.Volume()
- if not values.get('id'):
- values['id'] = str(utils.gen_uuid())
- volume_ref.update(values)
-
- session = get_session()
- with session.begin():
- volume_ref.save(session=session)
-
- return volume_get(context, values['id'], session=session)
-
-
-@require_admin_context
-def volume_data_get_for_project(context, project_id, session=None):
- result = model_query(context,
- func.count(models.Volume.id),
- func.sum(models.Volume.size),
- read_deleted="no",
- session=session).\
- filter_by(project_id=project_id).\
- first()
-
- # NOTE(vish): convert None to 0
- return (result[0] or 0, result[1] or 0)
-
-
-@require_admin_context
-def volume_destroy(context, volume_id):
- session = get_session()
- with session.begin():
- volume_ref = volume_get(context, volume_id, session=session)
- session.query(models.Volume).\
- filter_by(id=volume_id).\
- update({'deleted': True,
- 'deleted_at': timeutils.utcnow(),
- 'updated_at': literal_column('updated_at')})
- session.query(models.IscsiTarget).\
- filter_by(volume_id=volume_id).\
- update({'volume_id': None})
- session.query(models.VolumeMetadata).\
- filter_by(volume_id=volume_id).\
- update({'deleted': True,
- 'deleted_at': timeutils.utcnow(),
- 'updated_at': literal_column('updated_at')})
- return volume_ref
-
-
-@require_admin_context
-def volume_detached(context, volume_id):
- session = get_session()
- with session.begin():
- volume_ref = volume_get(context, volume_id, session=session)
- volume_ref['status'] = 'available'
- volume_ref['mountpoint'] = None
- volume_ref['attach_status'] = 'detached'
- volume_ref['instance_uuid'] = None
- volume_ref['attach_time'] = None
- volume_ref.save(session=session)
-
-
-@require_context
-def _volume_get_query(context, session=None, project_only=False):
- return model_query(context, models.Volume, session=session,
- project_only=project_only).\
- options(joinedload('volume_metadata')).\
- options(joinedload('volume_type'))
-
-
@require_context
def _ec2_volume_get_query(context, session=None):
return model_query(context, models.VolumeIdMapping,
@@ -3006,48 +2826,6 @@ def _ec2_snapshot_get_query(context, session=None):
session=session, read_deleted='yes')
-@require_context
-def volume_get(context, volume_id, session=None):
- result = _volume_get_query(context, session=session, project_only=True).\
- filter_by(id=volume_id).\
- first()
-
- if not result:
- raise exception.VolumeNotFound(volume_id=volume_id)
-
- return result
-
-
-@require_admin_context
-def volume_get_all(context):
- return _volume_get_query(context).all()
-
-
-@require_admin_context
-def volume_get_all_by_host(context, host):
- return _volume_get_query(context).filter_by(host=host).all()
-
-
-@require_admin_context
-def volume_get_all_by_instance_uuid(context, instance_uuid):
- result = model_query(context, models.Volume, read_deleted="no").\
- options(joinedload('volume_metadata')).\
- options(joinedload('volume_type')).\
- filter_by(instance_uuid=instance_uuid).\
- all()
-
- if not result:
- return []
-
- return result
-
-
-@require_context
-def volume_get_all_by_project(context, project_id):
- authorize_project_context(context, project_id)
- return _volume_get_query(context).filter_by(project_id=project_id).all()
-
-
@require_admin_context
def volume_get_iscsi_target_num(context, volume_id):
result = model_query(context, models.IscsiTarget, read_deleted="yes").\
@@ -3061,23 +2839,6 @@ def volume_get_iscsi_target_num(context, volume_id):
@require_context
-def volume_update(context, volume_id, values):
- session = get_session()
- volume_ref = volume_get(context, volume_id, session=session)
- metadata = values.get('metadata')
- if metadata is not None:
- volume_metadata_update(context,
- volume_id,
- values.pop('metadata'),
- delete=True)
- with session.begin():
- volume_ref.update(values)
- volume_ref.save(session=session)
-
- return volume_ref
-
-
-@require_context
def ec2_volume_create(context, volume_uuid, id=None):
"""Create ec2 compatable volume by provided uuid"""
ec2_volume_ref = models.VolumeIdMapping()
@@ -3151,153 +2912,6 @@ def get_snapshot_uuid_by_ec2_id(context, ec2_id, session=None):
return result['uuid']
-####################
-
-def _volume_metadata_get_query(context, volume_id, session=None):
- return model_query(context, models.VolumeMetadata,
- session=session, read_deleted="no").\
- filter_by(volume_id=volume_id)
-
-
-@require_context
-@require_volume_exists
-def volume_metadata_get(context, volume_id):
- rows = _volume_metadata_get_query(context, volume_id).all()
- result = {}
- for row in rows:
- result[row['key']] = row['value']
-
- return result
-
-
-@require_context
-@require_volume_exists
-def volume_metadata_delete(context, volume_id, key):
- _volume_metadata_get_query(context, volume_id).\
- filter_by(key=key).\
- update({'deleted': True,
- 'deleted_at': timeutils.utcnow(),
- 'updated_at': literal_column('updated_at')})
-
-
-@require_context
-@require_volume_exists
-def volume_metadata_get_item(context, volume_id, key, session=None):
- result = _volume_metadata_get_query(context, volume_id, session=session).\
- filter_by(key=key).\
- first()
-
- if not result:
- raise exception.VolumeMetadataNotFound(metadata_key=key,
- volume_id=volume_id)
- return result
-
-
-@require_context
-@require_volume_exists
-def volume_metadata_update(context, volume_id, metadata, delete):
- session = get_session()
-
- # Set existing metadata to deleted if delete argument is True
- if delete:
- original_metadata = volume_metadata_get(context, volume_id)
- for meta_key, meta_value in original_metadata.iteritems():
- if meta_key not in metadata:
- meta_ref = volume_metadata_get_item(context, volume_id,
- meta_key, session)
- meta_ref.update({'deleted': True})
- meta_ref.save(session=session)
-
- meta_ref = None
-
- # Now update all existing items with new values, or create new meta objects
- for meta_key, meta_value in metadata.iteritems():
-
- # update the value whether it exists or not
- item = {"value": meta_value}
-
- try:
- meta_ref = volume_metadata_get_item(context, volume_id,
- meta_key, session)
- except exception.VolumeMetadataNotFound:
- meta_ref = models.VolumeMetadata()
- item.update({"key": meta_key, "volume_id": volume_id})
-
- meta_ref.update(item)
- meta_ref.save(session=session)
-
- return metadata
-
-
-###################
-
-
-@require_context
-def snapshot_create(context, values):
- snapshot_ref = models.Snapshot()
- if not values.get('id'):
- values['id'] = str(utils.gen_uuid())
- snapshot_ref.update(values)
-
- session = get_session()
- with session.begin():
- snapshot_ref.save(session=session)
- return snapshot_ref
-
-
-@require_admin_context
-def snapshot_destroy(context, snapshot_id):
- session = get_session()
- with session.begin():
- session.query(models.Snapshot).\
- filter_by(id=snapshot_id).\
- update({'deleted': True,
- 'deleted_at': timeutils.utcnow(),
- 'updated_at': literal_column('updated_at')})
-
-
-@require_context
-def snapshot_get(context, snapshot_id, session=None):
- result = model_query(context, models.Snapshot, session=session,
- project_only=True).\
- filter_by(id=snapshot_id).\
- first()
-
- if not result:
- raise exception.SnapshotNotFound(snapshot_id=snapshot_id)
-
- return result
-
-
-@require_admin_context
-def snapshot_get_all(context):
- return model_query(context, models.Snapshot).all()
-
-
-@require_context
-def snapshot_get_all_for_volume(context, volume_id):
- return model_query(context, models.Snapshot, read_deleted='no',
- project_only=True).\
- filter_by(volume_id=volume_id).all()
-
-
-@require_context
-def snapshot_get_all_by_project(context, project_id):
- authorize_project_context(context, project_id)
- return model_query(context, models.Snapshot).\
- filter_by(project_id=project_id).\
- all()
-
-
-@require_context
-def snapshot_update(context, snapshot_id, values):
- session = get_session()
- with session.begin():
- snapshot_ref = snapshot_get(context, snapshot_id, session=session)
- snapshot_ref.update(values)
- snapshot_ref.save(session=session)
-
-
###################
@@ -3766,6 +3380,17 @@ def migration_get_unconfirmed_by_dest_compute(context, confirm_window,
all()
+@require_admin_context
+def migration_get_in_progress_by_host(context, host, session=None):
+
+ return model_query(context, models.Migration, session=session).\
+ filter(or_(models.Migration.source_compute == host,
+ models.Migration.dest_compute == host)).\
+ filter(~models.Migration.status.in_(['confirmed', 'reverted'])).\
+ options(joinedload('instance')).\
+ all()
+
+
##################
@@ -3901,7 +3526,7 @@ def instance_type_create(context, values):
def _dict_with_extra_specs(inst_type_query):
- """Takes an instance, volume, or instance type query returned
+ """Takes an instance or instance type query returned
by sqlalchemy and returns it as a dictionary, converting the
extra_specs entry from a list of dicts:
@@ -4196,15 +3821,6 @@ def instance_system_metadata_get(context, instance_uuid, session=None):
return result
-@require_context
-def instance_system_metadata_delete(context, instance_uuid, key):
- _instance_system_metadata_get_query(context, instance_uuid).\
- filter_by(key=key).\
- update({'deleted': True,
- 'deleted_at': timeutils.utcnow(),
- 'updated_at': literal_column('updated_at')})
-
-
def _instance_system_metadata_get_item(context, instance_uuid, key,
session=None):
result = _instance_system_metadata_get_query(
@@ -4443,185 +4059,6 @@ def instance_type_extra_specs_update_or_create(context, flavor_id,
return specs
-##################
-
-
-@require_admin_context
-def volume_type_create(context, values):
- """Create a new instance type. In order to pass in extra specs,
- the values dict should contain a 'extra_specs' key/value pair:
-
- {'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}}
-
- """
- session = get_session()
- with session.begin():
- try:
- volume_type_get_by_name(context, values['name'], session)
- raise exception.VolumeTypeExists(name=values['name'])
- except exception.VolumeTypeNotFoundByName:
- pass
- try:
- values['extra_specs'] = _metadata_refs(values.get('extra_specs'),
- models.VolumeTypeExtraSpecs)
- volume_type_ref = models.VolumeTypes()
- volume_type_ref.update(values)
- volume_type_ref.save()
- except Exception, e:
- raise exception.DBError(e)
- return volume_type_ref
-
-
-@require_context
-def volume_type_get_all(context, inactive=False, filters=None):
- """
- Returns a dict describing all volume_types with name as key.
- """
- filters = filters or {}
-
- read_deleted = "yes" if inactive else "no"
- rows = model_query(context, models.VolumeTypes,
- read_deleted=read_deleted).\
- options(joinedload('extra_specs')).\
- order_by("name").\
- all()
-
- # TODO(sirp): this patern of converting rows to a result with extra_specs
- # is repeated quite a bit, might be worth creating a method for it
- result = {}
- for row in rows:
- result[row['name']] = _dict_with_extra_specs(row)
-
- return result
-
-
-@require_context
-def volume_type_get(context, id, session=None):
- """Returns a dict describing specific volume_type"""
- result = model_query(context, models.VolumeTypes, session=session).\
- options(joinedload('extra_specs')).\
- filter_by(id=id).\
- first()
-
- if not result:
- raise exception.VolumeTypeNotFound(volume_type_id=id)
-
- return _dict_with_extra_specs(result)
-
-
-@require_context
-def volume_type_get_by_name(context, name, session=None):
- """Returns a dict describing specific volume_type"""
- result = model_query(context, models.VolumeTypes, session=session).\
- options(joinedload('extra_specs')).\
- filter_by(name=name).\
- first()
-
- if not result:
- raise exception.VolumeTypeNotFoundByName(volume_type_name=name)
- else:
- return _dict_with_extra_specs(result)
-
-
-@require_admin_context
-def volume_type_destroy(context, name):
- session = get_session()
- with session.begin():
- volume_type_ref = volume_type_get_by_name(context, name,
- session=session)
- volume_type_id = volume_type_ref['id']
- session.query(models.VolumeTypes).\
- filter_by(id=volume_type_id).\
- update({'deleted': True,
- 'deleted_at': timeutils.utcnow(),
- 'updated_at': literal_column('updated_at')})
- session.query(models.VolumeTypeExtraSpecs).\
- filter_by(volume_type_id=volume_type_id).\
- update({'deleted': True,
- 'deleted_at': timeutils.utcnow(),
- 'updated_at': literal_column('updated_at')})
-
-
-@require_context
-def volume_get_active_by_window(context, begin, end=None,
- project_id=None):
- """Return volumes that were active during window."""
- session = get_session()
- query = session.query(models.Volume)
-
- query = query.filter(or_(models.Volume.deleted_at == None,
- models.Volume.deleted_at > begin))
- if end:
- query = query.filter(models.Volume.created_at < end)
- if project_id:
- query = query.filter_by(project_id=project_id)
-
- return query.all()
-
-
-####################
-
-
-def _volume_type_extra_specs_query(context, volume_type_id, session=None):
- return model_query(context, models.VolumeTypeExtraSpecs, session=session,
- read_deleted="no").\
- filter_by(volume_type_id=volume_type_id)
-
-
-@require_context
-def volume_type_extra_specs_get(context, volume_type_id):
- rows = _volume_type_extra_specs_query(context, volume_type_id).\
- all()
-
- result = {}
- for row in rows:
- result[row['key']] = row['value']
-
- return result
-
-
-@require_context
-def volume_type_extra_specs_delete(context, volume_type_id, key):
- _volume_type_extra_specs_query(context, volume_type_id).\
- filter_by(key=key).\
- update({'deleted': True,
- 'deleted_at': timeutils.utcnow(),
- 'updated_at': literal_column('updated_at')})
-
-
-@require_context
-def volume_type_extra_specs_get_item(context, volume_type_id, key,
- session=None):
- result = _volume_type_extra_specs_query(
- context, volume_type_id, session=session).\
- filter_by(key=key).\
- first()
-
- if not result:
- raise exception.VolumeTypeExtraSpecsNotFound(
- extra_specs_key=key, volume_type_id=volume_type_id)
-
- return result
-
-
-@require_context
-def volume_type_extra_specs_update_or_create(context, volume_type_id,
- specs):
- session = get_session()
- spec_ref = None
- for key, value in specs.iteritems():
- try:
- spec_ref = volume_type_extra_specs_get_item(
- context, volume_type_id, key, session)
- except exception.VolumeTypeExtraSpecsNotFound:
- spec_ref = models.VolumeTypeExtraSpecs()
- spec_ref.update({"key": key, "value": value,
- "volume_type_id": volume_type_id,
- "deleted": False})
- spec_ref.save(session=session)
- return specs
-
-
####################
@@ -4664,211 +4101,6 @@ def s3_image_create(context, image_uuid):
####################
-@require_admin_context
-def sm_backend_conf_create(context, values):
- session = get_session()
- with session.begin():
- config_params = values['config_params']
- backend_conf = model_query(context, models.SMBackendConf,
- session=session,
- read_deleted="yes").\
- filter_by(config_params=config_params).\
- first()
-
- if backend_conf:
- raise exception.Duplicate(_('Backend exists'))
- else:
- backend_conf = models.SMBackendConf()
- backend_conf.update(values)
- backend_conf.save(session=session)
- return backend_conf
-
-
-@require_admin_context
-def sm_backend_conf_update(context, sm_backend_id, values):
- session = get_session()
- with session.begin():
- backend_conf = model_query(context, models.SMBackendConf,
- session=session,
- read_deleted="yes").\
- filter_by(id=sm_backend_id).\
- first()
-
- if not backend_conf:
- raise exception.NotFound(
- _("No backend config with id %(sm_backend_id)s") % locals())
-
- backend_conf.update(values)
- backend_conf.save(session=session)
- return backend_conf
-
-
-@require_admin_context
-def sm_backend_conf_delete(context, sm_backend_id):
- # FIXME(sirp): for consistency, shouldn't this just mark as deleted with
- # `purge` actually deleting the record?
- session = get_session()
- with session.begin():
- model_query(context, models.SMBackendConf, session=session,
- read_deleted="yes").\
- filter_by(id=sm_backend_id).\
- delete()
-
-
-@require_admin_context
-def sm_backend_conf_get(context, sm_backend_id):
- result = model_query(context, models.SMBackendConf, read_deleted="yes").\
- filter_by(id=sm_backend_id).\
- first()
-
- if not result:
- raise exception.NotFound(_("No backend config with id "
- "%(sm_backend_id)s") % locals())
-
- return result
-
-
-@require_admin_context
-def sm_backend_conf_get_by_sr(context, sr_uuid):
- result = model_query(context, models.SMBackendConf, read_deleted="yes").\
- filter_by(sr_uuid=sr_uuid).\
- first()
- if not result:
- raise exception.NotFound(_("No backend config with sr uuid "
- "%(sr_uuid)s") % locals())
- return result
-
-
-@require_admin_context
-def sm_backend_conf_get_all(context):
- return model_query(context, models.SMBackendConf, read_deleted="yes").\
- all()
-
-
-####################
-
-
-def _sm_flavor_get_query(context, sm_flavor_id, session=None):
- return model_query(context, models.SMFlavors, session=session,
- read_deleted="yes").\
- filter_by(id=sm_flavor_id)
-
-
-@require_admin_context
-def sm_flavor_create(context, values):
- session = get_session()
- with session.begin():
- sm_flavor = model_query(context, models.SMFlavors,
- session=session,
- read_deleted="yes").\
- filter_by(label=values['label']).\
- first()
- if not sm_flavor:
- sm_flavor = models.SMFlavors()
- sm_flavor.update(values)
- sm_flavor.save(session=session)
- else:
- raise exception.Duplicate(_('Flavor exists'))
- return sm_flavor
-
-
-@require_admin_context
-def sm_flavor_update(context, sm_flavor_id, values):
- session = get_session()
- with session.begin():
- sm_flavor = model_query(context, models.SMFlavors,
- session=session,
- read_deleted="yes").\
- filter_by(id=sm_flavor_id).\
- first()
- if not sm_flavor:
- raise exception.NotFound(
- _('%(sm_flavor_id) flavor not found') % locals())
- sm_flavor.update(values)
- sm_flavor.save(session=session)
- return sm_flavor
-
-
-@require_admin_context
-def sm_flavor_delete(context, sm_flavor_id):
- session = get_session()
- with session.begin():
- _sm_flavor_get_query(context, sm_flavor_id).delete()
-
-
-@require_admin_context
-def sm_flavor_get(context, sm_flavor_id):
- result = _sm_flavor_get_query(context, sm_flavor_id).first()
-
- if not result:
- raise exception.NotFound(
- _("No sm_flavor called %(sm_flavor_id)s") % locals())
-
- return result
-
-
-@require_admin_context
-def sm_flavor_get_all(context):
- return model_query(context, models.SMFlavors, read_deleted="yes").all()
-
-
-@require_admin_context
-def sm_flavor_get_by_label(context, sm_flavor_label):
- result = model_query(context, models.SMFlavors,
- read_deleted="yes").\
- filter_by(label=sm_flavor_label).first()
- if not result:
- raise exception.NotFound(
- _("No sm_flavor called %(sm_flavor_label)s") % locals())
- return result
-
-
-###############################
-
-
-def _sm_volume_get_query(context, volume_id, session=None):
- return model_query(context, models.SMVolume, session=session,
- read_deleted="yes").\
- filter_by(id=volume_id)
-
-
-def sm_volume_create(context, values):
- sm_volume = models.SMVolume()
- sm_volume.update(values)
- sm_volume.save()
- return sm_volume
-
-
-def sm_volume_update(context, volume_id, values):
- sm_volume = sm_volume_get(context, volume_id)
- sm_volume.update(values)
- sm_volume.save()
- return sm_volume
-
-
-def sm_volume_delete(context, volume_id):
- session = get_session()
- with session.begin():
- _sm_volume_get_query(context, volume_id, session=session).delete()
-
-
-def sm_volume_get(context, volume_id):
- result = _sm_volume_get_query(context, volume_id).first()
-
- if not result:
- raise exception.NotFound(
- _("No sm_volume with id %(volume_id)s") % locals())
-
- return result
-
-
-def sm_volume_get_all(context):
- return model_query(context, models.SMVolume, read_deleted="yes").all()
-
-
-################
-
-
def _aggregate_get_query(context, model_class, id_field, id,
session=None, read_deleted=None):
return model_query(context, model_class, session=session,
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/083_quota_class.py b/nova/db/sqlalchemy/migrate_repo/versions/083_quota_class.py
deleted file mode 100644
index d08afd16e..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/083_quota_class.py
+++ /dev/null
@@ -1,63 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Boolean, Column, DateTime
-from sqlalchemy import MetaData, Integer, String, Table
-
-from nova.openstack.common import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- # New table
- quota_classes = Table('quota_classes', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True),
- Column('class_name',
- String(length=255, convert_unicode=True,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False), index=True),
- Column('resource',
- String(length=255, convert_unicode=True,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False)),
- Column('hard_limit', Integer(), nullable=True),
- )
-
- try:
- quota_classes.create()
- except Exception:
- LOG.error(_("Table |%s| not created!"), repr(quota_classes))
- raise
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- quota_classes = Table('quota_classes', meta, autoload=True)
- try:
- quota_classes.drop()
- except Exception:
- LOG.error(_("quota_classes table not dropped"))
- raise
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/084_quotas_unlimited.py b/nova/db/sqlalchemy/migrate_repo/versions/084_quotas_unlimited.py
deleted file mode 100644
index d9308121d..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/084_quotas_unlimited.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 Red Hat, Inc
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy
-
-
-def upgrade(migrate_engine):
- """Map quotas hard_limit from NULL to -1"""
- _migrate_unlimited(migrate_engine, None, -1)
-
-
-def downgrade(migrate_engine):
- """Map quotas hard_limit from -1 to NULL"""
- _migrate_unlimited(migrate_engine, -1, None)
-
-
-def _migrate_unlimited(migrate_engine, old_limit, new_limit):
- meta = sqlalchemy.MetaData()
- meta.bind = migrate_engine
-
- def _migrate(table_name):
- table = sqlalchemy.Table(table_name, meta, autoload=True)
- table.update().\
- where(table.c.hard_limit == old_limit).\
- values(hard_limit=new_limit).execute()
-
- _migrate('quotas')
- _migrate('quota_classes')
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/086_set_engine_mysql_innodb.py b/nova/db/sqlalchemy/migrate_repo/versions/086_set_engine_mysql_innodb.py
deleted file mode 100644
index da985b956..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/086_set_engine_mysql_innodb.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import MetaData
-
-
-def upgrade(migrate_engine):
- # Upgrade operations go here. Don't create your own engine;
- # bind migrate_engine to your metadata
-
- tables = ["agent_builds", "aggregate_hosts", "aggregate_metadata",
- "aggregates", "block_device_mapping", "bw_usage_cache",
- "dns_domains", "instance_faults", "instance_type_extra_specs",
- "provider_fw_rules", "quota_classes", "s3_images",
- "sm_backend_config", "sm_flavors", "sm_volume",
- "virtual_storage_arrays", "volume_metadata",
- "volume_type_extra_specs", "volume_types"]
-
- meta = MetaData()
- meta.bind = migrate_engine
- if migrate_engine.name == "mysql":
- d = migrate_engine.execute("SHOW TABLE STATUS WHERE Engine!='InnoDB';")
- for row in d.fetchall():
- table_name = row[0]
- if table_name in tables:
- migrate_engine.execute("ALTER TABLE %s Engine=InnoDB" %
- table_name)
-
-
-def downgrade(migrate_engine):
- pass
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/087_add_uuid_to_bw_usage_cache.py b/nova/db/sqlalchemy/migrate_repo/versions/087_add_uuid_to_bw_usage_cache.py
deleted file mode 100644
index ce07905c8..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/087_add_uuid_to_bw_usage_cache.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Boolean, Column, DateTime, BigInteger
-from sqlalchemy import MetaData, Integer, String, Table
-
-from nova.openstack.common import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- # add column:
- bw_usage_cache = Table('bw_usage_cache', meta, autoload=True)
- uuid = Column('uuid', String(36))
-
- # clear the cache to get rid of entries with no uuid
- migrate_engine.execute(bw_usage_cache.delete())
-
- bw_usage_cache.create_column(uuid)
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- # drop column:
- bw_usage_cache = Table('bw_usage_cache', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True, nullable=False),
- Column('mac', String(255)),
- Column('uuid', String(36)),
- Column('start_period', DateTime(timezone=False), nullable=False),
- Column('last_refreshed', DateTime(timezone=False)),
- Column('bw_in', BigInteger()),
- Column('bw_out', BigInteger()),
- extend_existing=True)
-
- bw_usage_cache.drop_column('uuid')
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/088_change_instance_id_to_uuid_in_block_device_mapping.py b/nova/db/sqlalchemy/migrate_repo/versions/088_change_instance_id_to_uuid_in_block_device_mapping.py
deleted file mode 100644
index 73d8b6968..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/088_change_instance_id_to_uuid_in_block_device_mapping.py
+++ /dev/null
@@ -1,80 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 OpenStack LLC.
-# Copyright 2012 Michael Still and Canonical Inc
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from migrate import ForeignKeyConstraint
-from sqlalchemy import MetaData, String, Table
-from sqlalchemy import select, Column, ForeignKey, Integer
-
-from nova.openstack.common import log as logging
-
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- block_device_mapping = Table('block_device_mapping', meta, autoload=True)
- instances = Table('instances', meta, autoload=True)
- uuid_column = Column('instance_uuid', String(36))
- uuid_column.create(block_device_mapping)
-
- try:
- block_device_mapping.update().values(
- instance_uuid=select(
- [instances.c.uuid],
- instances.c.id == block_device_mapping.c.instance_id)
- ).execute()
- except Exception:
- uuid_column.drop()
- raise
-
- fkeys = list(block_device_mapping.c.instance_id.foreign_keys)
- if fkeys:
- try:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(
- columns=[block_device_mapping.c.instance_id],
- refcolumns=[instances.c.id],
- name=fkey_name).drop()
- except Exception:
- LOG.error(_("foreign key constraint couldn't be removed"))
- raise
-
- block_device_mapping.c.instance_id.drop()
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- block_device_mapping = Table('block_device_mapping', meta, autoload=True)
- instances = Table('instances', meta, autoload=True)
- id_column = Column('instance_id', Integer, ForeignKey('instances.id'))
- id_column.create(block_device_mapping)
-
- try:
- block_device_mapping.update().values(
- instance_id=select(
- [instances.c.id],
- instances.c.uuid == block_device_mapping.c.instance_uuid)
- ).execute()
- except Exception:
- id_column.drop()
- raise
-
- block_device_mapping.c.instance_uuid.drop()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/088_sqlite_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/088_sqlite_downgrade.sql
deleted file mode 100644
index 3699ce9ab..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/088_sqlite_downgrade.sql
+++ /dev/null
@@ -1,97 +0,0 @@
-BEGIN TRANSACTION;
- CREATE TEMPORARY TABLE block_device_mapping_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- instance_id INTEGER NOT NULL,
- device_name VARCHAR(255) NOT NULL,
- delete_on_termination BOOLEAN,
- virtual_name VARCHAR(255),
- snapshot_id INTEGER,
- volume_id INTEGER,
- volume_size INTEGER,
- no_device BOOLEAN,
- connection_info TEXT,
- instance_uuid VARCHAR(36),
- PRIMARY KEY (id),
- FOREIGN KEY(snapshot_id) REFERENCES snapshots (id),
- CHECK (deleted IN (0, 1)),
- CHECK (delete_on_termination IN (0, 1)),
- CHECK (no_device IN (0, 1)),
- FOREIGN KEY(volume_id) REFERENCES volumes (id),
- FOREIGN KEY(instance_id) REFERENCES instances (id)
- );
-
- INSERT INTO block_device_mapping_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- NULL,
- device_name,
- delete_on_termination,
- virtual_name,
- snapshot_id,
- volume_id,
- volume_size,
- no_device,
- connection_info,
- instance_uuid
- FROM block_device_mapping;
-
- UPDATE block_device_mapping_backup
- SET instance_id=
- (SELECT id
- FROM instances
- WHERE block_device_mapping_backup.instance_uuid = instances.uuid
- );
-
- DROP TABLE block_device_mapping;
-
- CREATE TABLE block_device_mapping (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- instance_id INTEGER NOT NULL,
- device_name VARCHAR(255) NOT NULL,
- delete_on_termination BOOLEAN,
- virtual_name VARCHAR(255),
- snapshot_id INTEGER,
- volume_id INTEGER,
- volume_size INTEGER,
- no_device BOOLEAN,
- connection_info TEXT,
- PRIMARY KEY (id),
- FOREIGN KEY(snapshot_id) REFERENCES snapshots (id),
- CHECK (deleted IN (0, 1)),
- CHECK (delete_on_termination IN (0, 1)),
- CHECK (no_device IN (0, 1)),
- FOREIGN KEY(volume_id) REFERENCES volumes (id),
- FOREIGN KEY(instance_id) REFERENCES instances (id)
- );
-
- INSERT INTO block_device_mapping
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- instance_id,
- device_name,
- delete_on_termination,
- virtual_name,
- snapshot_id,
- volume_id,
- volume_size,
- no_device,
- connection_info
- FROM block_device_mapping_backup;
-
- DROP TABLE block_device_mapping_backup;
-
-COMMIT; \ No newline at end of file
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/088_sqlite_upgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/088_sqlite_upgrade.sql
deleted file mode 100644
index d75d2ffa2..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/088_sqlite_upgrade.sql
+++ /dev/null
@@ -1,97 +0,0 @@
-BEGIN TRANSACTION;
- CREATE TEMPORARY TABLE block_device_mapping_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- instance_id INTEGER NOT NULL,
- device_name VARCHAR(255) NOT NULL,
- delete_on_termination BOOLEAN,
- virtual_name VARCHAR(255),
- snapshot_id INTEGER,
- volume_id INTEGER,
- volume_size INTEGER,
- no_device BOOLEAN,
- connection_info TEXT,
- instance_uuid VARCHAR(36),
- PRIMARY KEY (id),
- FOREIGN KEY(snapshot_id) REFERENCES snapshots (id),
- CHECK (deleted IN (0, 1)),
- CHECK (delete_on_termination IN (0, 1)),
- CHECK (no_device IN (0, 1)),
- FOREIGN KEY(volume_id) REFERENCES volumes (id),
- FOREIGN KEY(instance_id) REFERENCES instances (id)
- );
-
- INSERT INTO block_device_mapping_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- instance_id,
- device_name,
- delete_on_termination,
- virtual_name,
- snapshot_id,
- volume_id,
- volume_size,
- no_device,
- connection_info,
- NULL
- FROM block_device_mapping;
-
- UPDATE block_device_mapping_backup
- SET instance_uuid=
- (SELECT uuid
- FROM instances
- WHERE block_device_mapping_backup.instance_id = instances.id
- );
-
- DROP TABLE block_device_mapping;
-
- CREATE TABLE block_device_mapping (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- device_name VARCHAR(255) NOT NULL,
- delete_on_termination BOOLEAN,
- virtual_name VARCHAR(255),
- snapshot_id INTEGER,
- volume_id INTEGER,
- volume_size INTEGER,
- no_device BOOLEAN,
- connection_info TEXT,
- instance_uuid VARCHAR(36),
- PRIMARY KEY (id),
- FOREIGN KEY(snapshot_id) REFERENCES snapshots (id),
- CHECK (deleted IN (0, 1)),
- CHECK (delete_on_termination IN (0, 1)),
- CHECK (no_device IN (0, 1)),
- FOREIGN KEY(volume_id) REFERENCES volumes (id),
- FOREIGN KEY(instance_uuid) REFERENCES instances (uuid)
- );
-
- INSERT INTO block_device_mapping
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- device_name,
- delete_on_termination,
- virtual_name,
- snapshot_id,
- volume_id,
- volume_size,
- no_device,
- connection_info,
- instance_uuid
- FROM block_device_mapping_backup;
-
- DROP TABLE block_device_mapping_backup;
-
-COMMIT;
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/089_add_volume_id_mappings.py b/nova/db/sqlalchemy/migrate_repo/versions/089_add_volume_id_mappings.py
deleted file mode 100644
index d878e250b..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/089_add_volume_id_mappings.py
+++ /dev/null
@@ -1,117 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Boolean, Column, DateTime, Integer
-from sqlalchemy import MetaData, String, Table
-
-from nova.openstack.common import log as logging
-from nova import utils
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- """Build mapping tables for our volume uuid migration.
-
- These mapping tables serve two purposes:
- 1. Provide a method for downgrade after UUID conversion
- 2. Provide a uuid to associate with existing volumes and snapshots
- when we do the actual datatype migration from int to uuid
-
- """
- meta = MetaData()
- meta.bind = migrate_engine
-
- volume_id_mappings = Table('volume_id_mappings', meta,
- Column('created_at',
- DateTime(timezone=False)),
- Column('updated_at',
- DateTime(timezone=False)),
- Column('deleted_at',
- DateTime(timezone=False)),
- Column('deleted',
- Boolean(create_constraint=True, name=None)),
- Column('id', Integer(),
- primary_key=True,
- nullable=False,
- autoincrement=True),
- Column('uuid', String(36),
- nullable=False))
- try:
- volume_id_mappings.create()
- except Exception:
- LOG.exception("Exception while creating table 'volume_id_mappings'")
- meta.drop_all(tables=[volume_id_mappings])
- raise
-
- snapshot_id_mappings = Table('snapshot_id_mappings', meta,
- Column('created_at',
- DateTime(timezone=False)),
- Column('updated_at',
- DateTime(timezone=False)),
- Column('deleted_at',
- DateTime(timezone=False)),
- Column('deleted',
- Boolean(create_constraint=True, name=None)),
- Column('id', Integer(),
- primary_key=True,
- nullable=False,
- autoincrement=True),
- Column('uuid', String(36),
- nullable=False))
- try:
- snapshot_id_mappings.create()
- except Exception:
- LOG.exception("Exception while creating table 'snapshot_id_mappings'")
- meta.drop_all(tables=[snapshot_id_mappings])
- raise
-
- if migrate_engine.name == "mysql":
- migrate_engine.execute("ALTER TABLE volume_id_mappings Engine=InnoDB")
- migrate_engine.execute("ALTER TABLE snapshot_id_mappings "
- "Engine=InnoDB")
-
- volumes = Table('volumes', meta, autoload=True)
- snapshots = Table('snapshots', meta, autoload=True)
- volume_id_mappings = Table('volume_id_mappings', meta, autoload=True)
- snapshot_id_mappings = Table('snapshot_id_mappings', meta, autoload=True)
-
- volume_list = list(volumes.select().execute())
- for v in volume_list:
- old_id = v['id']
- new_id = utils.gen_uuid()
- row = volume_id_mappings.insert()
- row.execute({'id': old_id,
- 'uuid': str(new_id)})
-
- snapshot_list = list(snapshots.select().execute())
- for s in snapshot_list:
- old_id = s['id']
- new_id = utils.gen_uuid()
- row = snapshot_id_mappings.insert()
- row.execute({'id': old_id,
- 'uuid': str(new_id)})
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- volume_id_mappings = Table('volume_id_mappings', meta, autoload=True)
- volume_id_mappings.drop()
-
- snapshot_id_mappings = Table('snapshot_id_mappings', meta, autoload=True)
- snapshot_id_mappings.drop()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/090_modify_volume_id_datatype.py b/nova/db/sqlalchemy/migrate_repo/versions/090_modify_volume_id_datatype.py
deleted file mode 100644
index 4be63b322..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/090_modify_volume_id_datatype.py
+++ /dev/null
@@ -1,237 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from migrate import ForeignKeyConstraint
-from sqlalchemy import Integer
-from sqlalchemy import MetaData, String, Table
-
-from migrate import ForeignKeyConstraint
-from nova.openstack.common import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- """Convert volume and snapshot id columns from int to varchar."""
- meta = MetaData()
- meta.bind = migrate_engine
-
- volumes = Table('volumes', meta, autoload=True)
- snapshots = Table('snapshots', meta, autoload=True)
- iscsi_targets = Table('iscsi_targets', meta, autoload=True)
- volume_metadata = Table('volume_metadata', meta, autoload=True)
- sm_volume = Table('sm_volume', meta, autoload=True)
- block_device_mapping = Table('block_device_mapping', meta, autoload=True)
-
- try:
- fkeys = list(snapshots.c.volume_id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[snapshots.c.volume_id],
- refcolumns=[volumes.c.id],
- name=fkey_name).drop()
-
- fkeys = list(iscsi_targets.c.volume_id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[iscsi_targets.c.volume_id],
- refcolumns=[volumes.c.id],
- name=fkey_name).drop()
-
- fkeys = list(volume_metadata.c.volume_id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[volume_metadata.c.volume_id],
- refcolumns=[volumes.c.id],
- name=fkey_name).drop()
-
- fkeys = list(sm_volume.c.id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[sm_volume.c.id],
- refcolumns=[volumes.c.id],
- name=fkey_name).drop()
-
- fkeys = list(block_device_mapping.c.volume_id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[block_device_mapping.c.volume_id],
- refcolumns=[volumes.c.id],
- name=fkey_name).drop()
-
- fkeys = list(block_device_mapping.c.snapshot_id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[block_device_mapping.c.snapshot_id],
- refcolumns=[snapshots.c.id],
- name=fkey_name).drop()
-
- except Exception:
- LOG.error(_("Foreign Key constraint couldn't be removed"))
- raise
-
- volumes.c.id.alter(String(36), primary_key=True)
- volumes.c.snapshot_id.alter(String(36))
- volume_metadata.c.volume_id.alter(String(36), nullable=False)
- snapshots.c.id.alter(String(36), primary_key=True)
- snapshots.c.volume_id.alter(String(36))
- sm_volume.c.id.alter(String(36))
- block_device_mapping.c.volume_id.alter(String(36))
- block_device_mapping.c.snapshot_id.alter(String(36))
- iscsi_targets.c.volume_id.alter(String(36), nullable=True)
-
- try:
- fkeys = list(snapshots.c.volume_id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[snapshots.c.volume_id],
- refcolumns=[volumes.c.id],
- name=fkey_name).create()
-
- fkeys = list(iscsi_targets.c.volume_id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[iscsi_targets.c.volume_id],
- refcolumns=[volumes.c.id],
- name=fkey_name).create()
-
- fkeys = list(volume_metadata.c.volume_id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[volume_metadata.c.volume_id],
- refcolumns=[volumes.c.id],
- name=fkey_name).create()
-
- fkeys = list(sm_volume.c.id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[sm_volume.c.id],
- refcolumns=[volumes.c.id],
- name=fkey_name).create()
- # NOTE(jdg) We're intentionally leaving off FK's on BDM
-
- except Exception:
- LOG.error(_("Foreign Key constraint couldn't be removed"))
- raise
-
-
-def downgrade(migrate_engine):
- """Convert volume and snapshot id columns back to int."""
- meta = MetaData()
- meta.bind = migrate_engine
- dialect = migrate_engine.url.get_dialect().name
-
- if dialect.startswith('sqlite'):
- return
-
- volumes = Table('volumes', meta, autoload=True)
- snapshots = Table('snapshots', meta, autoload=True)
- iscsi_targets = Table('iscsi_targets', meta, autoload=True)
- volume_metadata = Table('volume_metadata', meta, autoload=True)
- sm_volume = Table('sm_volume', meta, autoload=True)
- block_device_mapping = Table('block_device_mapping', meta, autoload=True)
-
- try:
- fkeys = list(snapshots.c.volume_id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[snapshots.c.volume_id],
- refcolumns=[volumes.c.id],
- name=fkey_name).drop()
-
- fkeys = list(iscsi_targets.c.volume_id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[iscsi_targets.c.volume_id],
- refcolumns=[volumes.c.id],
- name=fkey_name).drop()
-
- fkeys = list(volume_metadata.c.volume_id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[volume_metadata.c.volume_id],
- refcolumns=[volumes.c.id],
- name=fkey_name).drop()
-
- fkeys = list(sm_volume.c.id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[sm_volume.c.id],
- refcolumns=[volumes.c.id],
- name=fkey_name).drop()
-
- except Exception:
- LOG.error(_("Foreign Key constraint couldn't be removed"))
- raise
-
- volumes.c.id.alter(Integer, primary_key=True, autoincrement=True)
- volumes.c.snapshot_id.alter(Integer)
- volume_metadata.c.volume_id.alter(Integer, nullable=False)
- snapshots.c.id.alter(Integer, primary_key=True, autoincrement=True)
- snapshots.c.volume_id.alter(Integer)
- sm_volume.c.id.alter(Integer)
- block_device_mapping.c.volume_id.alter(Integer)
- block_device_mapping.c.snapshot_id.alter(Integer)
- iscsi_targets.c.volume_id.alter(Integer, nullable=True)
-
- try:
- fkeys = list(snapshots.c.volume_id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[snapshots.c.volume_id],
- refcolumns=[volumes.c.id],
- name=fkey_name).create()
-
- fkeys = list(iscsi_targets.c.volume_id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[iscsi_targets.c.volume_id],
- refcolumns=[volumes.c.id],
- name=fkey_name).create()
-
- fkeys = list(volume_metadata.c.volume_id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[volume_metadata.c.volume_id],
- refcolumns=[volumes.c.id],
- name=fkey_name).create()
-
- fkeys = list(sm_volume.c.id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[sm_volume.c.id],
- refcolumns=[volumes.c.id],
- name=fkey_name).create()
-
- # NOTE(jdg) Put the BDM foreign keys back in place
- fkeys = list(block_device_mapping.c.volume_id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[block_device_mapping.c.volume_id],
- refcolumns=[volumes.c.id],
- name=fkey_name).drop()
-
- fkeys = list(block_device_mapping.c.snapshot_id.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(columns=[block_device_mapping.c.snapshot_id],
- refcolumns=[snapshots.c.id],
- name=fkey_name).drop()
-
- except Exception:
- LOG.error(_("Foreign Key constraint couldn't be removed"))
- raise
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/090_sqlite_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/090_sqlite_downgrade.sql
deleted file mode 100644
index 7d89da247..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/090_sqlite_downgrade.sql
+++ /dev/null
@@ -1,226 +0,0 @@
-BEGIN TRANSACTION;
-
- -- change id and snapshot_id datatypes in volumes table
- CREATE TABLE volumes_backup(
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- ec2_id INTEGER,
- user_id VARCHAR(255),
- project_id VARCHAR(255),
- snapshot_id VARCHAR(255),
- host VARCHAR(255),
- size INTEGER,
- availability_zone VARCHAR(255),
- instance_id INTEGER,
- mountpoint VARCHAR(255),
- attach_time VARCHAR(255),
- status VARCHAR(255),
- attach_status VARCHAR(255),
- scheduled_at DATETIME,
- launched_at DATETIME,
- terminated_at DATETIME,
- display_name VARCHAR(255),
- display_description VARCHAR(255),
- provider_location VARCHAR(255),
- provider_auth VARCHAR(255),
- volume_type_id INTEGER,
- PRIMARY KEY (id),
- FOREIGN KEY(instance_id) REFERENCES instances (id),
- UNIQUE (id),
- CHECK (deleted IN (0, 1))
- );
-
- INSERT INTO volumes_backup SELECT
- created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- ec2_id,
- user_id,
- project_id,
- snapshot_id,
- host,
- size,
- availability_zone,
- instance_id,
- mountpoint,
- attach_time,
- status,
- attach_status,
- scheduled_at,
- launched_at,
- terminated_at,
- display_name,
- display_description,
- provider_location,
- provider_auth,
- volume_type_id
- FROM volumes;
- DROP TABLE volumes;
- ALTER TABLE volumes_backup RENAME TO volumes;
-
- -- change id and volume_id datatypes in snapshots table
- CREATE TABLE snapshots_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- user_id VARCHAR(255),
- project_id VARCHAR(255),
- volume_id INTEGER,
- status VARCHAR(255),
- progress VARCHAR(255),
- volume_size INTEGER,
- display_name VARCHAR(255),
- display_description VARCHAR(255),
- PRIMARY KEY (id),
- UNIQUE (id),
- CHECK (deleted IN (0, 1))
- );
- INSERT INTO snapshots_backup SELECT
- created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- user_id,
- project_id,
- volume_id,
- status,
- progress,
- volume_size,
- display_name,
- display_description
- FROM snapshots;
- DROP TABLE snapshots;
- ALTER TABLE snapshots_backup RENAME TO snapshots;
-
- -- change id and volume_id datatypes in iscsi_targets table
- CREATE TABLE iscsi_targets_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- target_num INTEGER,
- host VARCHAR(255),
- volume_id INTEGER,
- PRIMARY KEY (id),
- FOREIGN KEY(volume_id) REFERENCES volumes(id),
- UNIQUE (id),
- CHECK (deleted IN (0, 1))
- );
- INSERT INTO iscsi_targets_backup SELECT
- created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- target_num,
- host,
- volume_id
- FROM iscsi_targets;
- DROP TABLE iscsi_targets;
- ALTER TABLE iscsi_targets_backup RENAME TO iscsi_targets;
-
- CREATE TABLE volume_metadata_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- key VARCHAR(255),
- value VARCHAR(255),
- volume_id INTEGER,
- PRIMARY KEY (id),
- FOREIGN KEY(volume_id) REFERENCES volumes(id),
- UNIQUE (id),
- CHECK (deleted IN (0, 1))
- );
- INSERT INTO volume_metadata_backup SELECT
- created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- key,
- value,
- volume_id
- FROM volume_metadata;
- DROP TABLE volume_metadata;
- ALTER TABLE volume_metadata_backup RENAME TO volume_metadata;
-
- -- change volume_id and snapshot_id datatypes in bdm table
- CREATE TABLE block_device_mapping_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- instance_uuid VARCHAR(36) NOT NULL,
- device_name VARCHAR(255),
- delete_on_termination BOOLEAN,
- virtual_name VARCHAR(255),
- snapshot_id INTEGER,
- volume_id INTEGER,
- volume_size INTEGER,
- no_device BOOLEAN,
- connection_info VARCHAR(255),
- FOREIGN KEY(instance_uuid) REFERENCES instances(id),
- FOREIGN KEY(volume_id) REFERENCES volumes(id),
- FOREIGN KEY(snapshot_id) REFERENCES snapshots(id),
- PRIMARY KEY (id),
- UNIQUE (id),
- CHECK (deleted IN (0, 1))
- );
- INSERT INTO block_device_mapping_backup SELECT
- created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- instance_uuid,
- device_name,
- delete_on_termination,
- virtual_name,
- snapshot_id,
- volume_id,
- volume_size,
- no_device,
- connection_info
- FROM block_device_mapping;
- DROP TABLE block_device_mapping;
- ALTER TABLE block_device_mapping_backup RENAME TO block_device_mapping;
-
- -- change volume_id and sm_volume_table
- CREATE TABLE sm_volume_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- backend_id INTEGER NOT NULL,
- vdi_uuid VARCHAR(255),
- PRIMARY KEY (id),
- FOREIGN KEY(id) REFERENCES volumes(id),
- UNIQUE (id),
- CHECK (deleted IN (0,1))
- );
- INSERT INTO sm_volume_backup SELECT
- created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- backend_id,
- vdi_uuid
- FROM sm_volume;
- DROP TABLE sm_volume;
- ALTER TABLE sm_volume_backup RENAME TO sm_volume;
-
-COMMIT;
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/090_sqlite_upgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/090_sqlite_upgrade.sql
deleted file mode 100644
index 53fbc69f6..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/090_sqlite_upgrade.sql
+++ /dev/null
@@ -1,226 +0,0 @@
-BEGIN TRANSACTION;
-
- -- change id and snapshot_id datatypes in volumes table
- CREATE TABLE volumes_backup(
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id VARCHAR(36) NOT NULL,
- ec2_id INTEGER,
- user_id VARCHAR(255),
- project_id VARCHAR(255),
- snapshot_id VARCHAR(36),
- host VARCHAR(255),
- size INTEGER,
- availability_zone VARCHAR(255),
- instance_id INTEGER,
- mountpoint VARCHAR(255),
- attach_time VARCHAR(255),
- status VARCHAR(255),
- attach_status VARCHAR(255),
- scheduled_at DATETIME,
- launched_at DATETIME,
- terminated_at DATETIME,
- display_name VARCHAR(255),
- display_description VARCHAR(255),
- provider_location VARCHAR(255),
- provider_auth VARCHAR(255),
- volume_type_id INTEGER,
- PRIMARY KEY (id),
- FOREIGN KEY(instance_id) REFERENCES instances (id),
- UNIQUE (id),
- CHECK (deleted IN (0, 1))
- );
-
- INSERT INTO volumes_backup SELECT
- created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- ec2_id,
- user_id,
- project_id,
- snapshot_id,
- host,
- size,
- availability_zone,
- instance_id,
- mountpoint,
- attach_time,
- status,
- attach_status,
- scheduled_at,
- launched_at,
- terminated_at,
- display_name,
- display_description,
- provider_location,
- provider_auth,
- volume_type_id
- FROM volumes;
- DROP TABLE volumes;
- ALTER TABLE volumes_backup RENAME TO volumes;
-
- -- change id and volume_id datatypes in snapshots table
- CREATE TABLE snapshots_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id VARCHAR(36) NOT NULL,
- user_id VARCHAR(255),
- project_id VARCHAR(255),
- volume_id VARCHAR(36),
- status VARCHAR(255),
- progress VARCHAR(255),
- volume_size INTEGER,
- display_name VARCHAR(255),
- display_description VARCHAR(255),
- PRIMARY KEY (id),
- UNIQUE (id),
- CHECK (deleted IN (0, 1))
- );
- INSERT INTO snapshots_backup SELECT
- created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- user_id,
- project_id,
- volume_id,
- status,
- progress,
- volume_size,
- display_name,
- display_description
- FROM snapshots;
- DROP TABLE snapshots;
- ALTER TABLE snapshots_backup RENAME TO snapshots;
-
- -- change id and volume_id datatypes in iscsi_targets table
- CREATE TABLE iscsi_targets_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- target_num INTEGER,
- host VARCHAR(255),
- volume_id VARCHAR(36),
- PRIMARY KEY (id),
- FOREIGN KEY(volume_id) REFERENCES volumes(id),
- UNIQUE (id),
- CHECK (deleted IN (0, 1))
- );
- INSERT INTO iscsi_targets_backup SELECT
- created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- target_num,
- host,
- volume_id
- FROM iscsi_targets;
- DROP TABLE iscsi_targets;
- ALTER TABLE iscsi_targets_backup RENAME TO iscsi_targets;
-
- CREATE TABLE volume_metadata_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- key VARCHAR(255),
- value VARCHAR(255),
- volume_id VARCHAR(36),
- PRIMARY KEY (id),
- FOREIGN KEY(volume_id) REFERENCES volumes(id),
- UNIQUE (id),
- CHECK (deleted IN (0, 1))
- );
- INSERT INTO volume_metadata_backup SELECT
- created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- key,
- value,
- volume_id
- FROM volume_metadata;
- DROP TABLE volume_metadata;
- ALTER TABLE volume_metadata_backup RENAME TO volume_metadata;
-
- -- change volume_id and snapshot_id datatypes in bdm table
- CREATE TABLE block_device_mapping_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- instance_uuid VARCHAR(36) NOT NULL,
- device_name VARCHAR(255),
- delete_on_termination BOOLEAN,
- virtual_name VARCHAR(255),
- snapshot_id VARCHAR(36),
- volume_id VARCHAR(36),
- volume_size INTEGER,
- no_device BOOLEAN,
- connection_info VARCHAR(255),
- FOREIGN KEY(instance_uuid) REFERENCES instances(id),
- FOREIGN KEY(volume_id) REFERENCES volumes(id),
- FOREIGN KEY(snapshot_id) REFERENCES snapshots(id),
- PRIMARY KEY (id),
- UNIQUE (id),
- CHECK (deleted IN (0, 1))
- );
- INSERT INTO block_device_mapping_backup SELECT
- created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- instance_uuid,
- device_name,
- delete_on_termination,
- virtual_name,
- snapshot_id,
- volume_id,
- volume_size,
- no_device,
- connection_info
- FROM block_device_mapping;
- DROP TABLE block_device_mapping;
- ALTER TABLE block_device_mapping_backup RENAME TO block_device_mapping;
-
- -- change volume_id and sm_volume_table
- CREATE TABLE sm_volume_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id VARCHAR(36) NOT NULL,
- backend_id INTEGER NOT NULL,
- vdi_uuid VARCHAR(255),
- PRIMARY KEY (id),
- FOREIGN KEY(id) REFERENCES volumes(id),
- UNIQUE (id),
- CHECK (deleted IN (0,1))
- );
- INSERT INTO sm_volume_backup SELECT
- created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- backend_id,
- vdi_uuid
- FROM sm_volume;
- DROP TABLE sm_volume;
- ALTER TABLE sm_volume_backup RENAME TO sm_volume;
-
-COMMIT;
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/091_convert_volume_ids_to_uuid.py b/nova/db/sqlalchemy/migrate_repo/versions/091_convert_volume_ids_to_uuid.py
deleted file mode 100644
index dadf15d30..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/091_convert_volume_ids_to_uuid.py
+++ /dev/null
@@ -1,205 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-from migrate import ForeignKeyConstraint, NotSupportedError
-from sqlalchemy import MetaData, select, Table
-
-from nova.openstack.common import log as logging
-
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- """Convert volume and snapshot id columns from int to varchar."""
- meta = MetaData()
- meta.bind = migrate_engine
-
- volumes = Table('volumes', meta, autoload=True)
- snapshots = Table('snapshots', meta, autoload=True)
- iscsi_targets = Table('iscsi_targets', meta, autoload=True)
- volume_metadata = Table('volume_metadata', meta, autoload=True)
- block_device_mapping = Table('block_device_mapping', meta, autoload=True)
- sm_volumes = Table('sm_volume', meta, autoload=True)
-
- volume_mappings = Table('volume_id_mappings', meta, autoload=True)
- snapshot_mappings = Table('snapshot_id_mappings', meta, autoload=True)
-
- fkey_columns = [
- iscsi_targets.c.volume_id,
- volume_metadata.c.volume_id,
- sm_volumes.c.id,
- ]
- for column in fkey_columns:
- fkeys = list(column.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- LOG.info('Dropping foreign key %s' % fkey_name)
- fkey = ForeignKeyConstraint(columns=[column],
- refcolumns=[volumes.c.id],
- name=fkey_name)
- try:
- fkey.drop()
- except NotSupportedError:
- # NOTE(sirp): sqlite doesn't support ALTER TABLE DROP
- # CONSTRAINT and sqlalchemy-migrate doesn't yet have a
- # work-around using temp tables.
- pass
-
- volume_list = list(volumes.select().execute())
- for v in volume_list:
- new_id = select([volume_mappings.c.uuid],
- volume_mappings.c.id == v['id']).execute().fetchone()[0]
-
- volumes.update().\
- where(volumes.c.id == v['id']).\
- values(id=new_id).execute()
-
- sm_volumes.update().\
- where(sm_volumes.c.id == v['id']).\
- values(id=new_id).execute()
-
- snapshots.update().\
- where(snapshots.c.volume_id == v['id']).\
- values(volume_id=new_id).execute()
-
- iscsi_targets.update().\
- where(iscsi_targets.c.volume_id == v['id']).\
- values(volume_id=new_id).execute()
-
- volume_metadata.update().\
- where(volume_metadata.c.volume_id == v['id']).\
- values(volume_id=new_id).execute()
-
- block_device_mapping.update().\
- where(block_device_mapping.c.volume_id == v['id']).\
- values(volume_id=new_id).execute()
-
- snapshot_list = list(snapshots.select().execute())
- for s in snapshot_list:
- new_id = select([snapshot_mappings.c.uuid],
- snapshot_mappings.c.id == s['id']).execute().fetchone()[0]
-
- volumes.update().\
- where(volumes.c.snapshot_id == s['id']).\
- values(snapshot_id=new_id).execute()
-
- snapshots.update().\
- where(snapshots.c.id == s['id']).\
- values(id=new_id).execute()
-
- block_device_mapping.update().\
- where(block_device_mapping.c.snapshot_id == s['id']).\
- values(snapshot_id=new_id).execute()
-
- for column in fkey_columns:
- fkeys = list(column.foreign_keys)
- if fkeys:
- fkey = ForeignKeyConstraint(columns=[column],
- refcolumns=[volumes.c.id])
- fkey.create()
- LOG.info('Created foreign key %s' % fkey_name)
-
-
-def downgrade(migrate_engine):
- """Convert volume and snapshot id columns back to int."""
- meta = MetaData()
- meta.bind = migrate_engine
-
- volumes = Table('volumes', meta, autoload=True)
- snapshots = Table('snapshots', meta, autoload=True)
- iscsi_targets = Table('iscsi_targets', meta, autoload=True)
- volume_metadata = Table('volume_metadata', meta, autoload=True)
- block_device_mapping = Table('block_device_mapping', meta, autoload=True)
- sm_volumes = Table('sm_volume', meta, autoload=True)
-
- volume_mappings = Table('volume_id_mappings', meta, autoload=True)
- snapshot_mappings = Table('snapshot_id_mappings', meta, autoload=True)
-
- fkey_columns = [
- iscsi_targets.c.volume_id,
- volume_metadata.c.volume_id,
- sm_volumes.c.id,
- ]
- for column in fkey_columns:
- fkeys = list(column.foreign_keys)
- if fkeys:
- fkey_name = fkeys[0].constraint.name
- LOG.info('Dropping foreign key %s' % fkey_name)
- fkey = ForeignKeyConstraint(columns=[column],
- refcolumns=[volumes.c.id],
- name=fkey_name)
- try:
- fkey.drop()
- except NotSupportedError:
- # NOTE(sirp): sqlite doesn't support ALTER TABLE DROP
- # CONSTRAINT and sqlalchemy-migrate doesn't yet have a
- # work-around using temp tables.
- pass
-
- volume_list = list(volumes.select().execute())
- for v in volume_list:
- new_id = select([volume_mappings.c.id],
- volume_mappings.c.uuid == v['id']).execute().fetchone()[0]
-
- volumes.update().\
- where(volumes.c.id == v['id']).\
- values(id=new_id).execute()
-
- sm_volumes.update().\
- where(sm_volumes.c.id == v['id']).\
- values(id=new_id).execute()
-
- snapshots.update().\
- where(snapshots.c.volume_id == v['id']).\
- values(volume_id=new_id).execute()
-
- iscsi_targets.update().\
- where(iscsi_targets.c.volume_id == v['id']).\
- values(volume_id=new_id).execute()
-
- volume_metadata.update().\
- where(volume_metadata.c.volume_id == v['id']).\
- values(volume_id=new_id).execute()
-
- block_device_mapping.update().\
- where(block_device_mapping.c.volume_id == v['id']).\
- values(volume_id=new_id).execute()
-
- snapshot_list = list(snapshots.select().execute())
- for s in snapshot_list:
- new_id = select([snapshot_mappings.c.id],
- snapshot_mappings.c.uuid == s['id']).execute().fetchone()[0]
-
- volumes.update().\
- where(volumes.c.snapshot_id == s['id']).\
- values(snapshot_id=new_id).execute()
-
- snapshots.update().\
- where(snapshots.c.id == s['id']).\
- values(id=new_id).execute()
-
- block_device_mapping.update().\
- where(block_device_mapping.c.snapshot_id == s['id']).\
- values(snapshot_id=new_id).execute()
-
- for column in fkey_columns:
- fkeys = list(column.foreign_keys)
- if fkeys:
- fkey = ForeignKeyConstraint(columns=[column],
- refcolumns=[volumes.c.id])
- fkey.create()
- LOG.info('Created foreign key %s' % fkey_name)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/092_add_instance_system_metadata.py b/nova/db/sqlalchemy/migrate_repo/versions/092_add_instance_system_metadata.py
deleted file mode 100644
index 85856ed6a..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/092_add_instance_system_metadata.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack, LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer
-from sqlalchemy import MetaData, String, Table
-
-from nova.openstack.common import log as logging
-
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- # Upgrade operations go here. Don't create your own engine;
- # bind migrate_engine to your metadata
- meta = MetaData()
- meta.bind = migrate_engine
-
- # load tables for fk
- instances = Table('instances', meta, autoload=True)
-
- instance_system_metadata = Table('instance_system_metadata', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True, nullable=False),
- Column('instance_uuid',
- String(36),
- ForeignKey('instances.uuid'),
- nullable=False),
- Column('key',
- String(length=255, convert_unicode=True,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False),
- nullable=False),
- Column('value',
- String(length=255, convert_unicode=True,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- mysql_engine='InnoDB')
-
- try:
- instance_system_metadata.create()
- except Exception:
- LOG.error(_("Table |%s| not created!"), repr(instance_system_metadata))
- raise
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- # load tables for fk
- instances = Table('instances', meta, autoload=True)
-
- instance_system_metadata = Table(
- 'instance_system_metadata', meta, autoload=True)
- instance_system_metadata.drop()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/093_drop_instance_actions_table.py b/nova/db/sqlalchemy/migrate_repo/versions/093_drop_instance_actions_table.py
deleted file mode 100644
index 0200861b2..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/093_drop_instance_actions_table.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack, LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Boolean, Column, DateTime, ForeignKey
-from sqlalchemy import Integer, MetaData, String, Table, Text
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- instance_actions = Table('instance_actions', meta, autoload=True)
- instance_actions.drop()
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- instances = Table('instances', meta, autoload=True,
- autoload_with=migrate_engine)
-
- instance_actions = Table('instance_actions', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True, nullable=False),
- Column('instance_id',
- Integer(),
- ForeignKey('instances.id')),
- Column('action',
- String(length=255, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- Column('error',
- Text(length=None, convert_unicode=False,
- assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False)),
- )
- instance_actions.create()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/094_update_postgresql_sequence_names.py b/nova/db/sqlalchemy/migrate_repo/versions/094_update_postgresql_sequence_names.py
deleted file mode 100644
index 5b1d9b490..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/094_update_postgresql_sequence_names.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2012 Red Hat, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import MetaData
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- # NOTE(dprince): Need to rename the leftover zones stuff and quota_new
- # stuff from Essex for PostgreSQL.
- if migrate_engine.name == "postgresql":
- sql = """ALTER TABLE zones_id_seq RENAME TO cells_id_seq;
- ALTER TABLE ONLY cells DROP CONSTRAINT zones_pkey;
- ALTER TABLE ONLY cells ADD CONSTRAINT cells_pkey
- PRIMARY KEY (id);
-
- ALTER TABLE quotas_new_id_seq RENAME TO quotas_id_seq;
- ALTER TABLE ONLY quotas DROP CONSTRAINT quotas_new_pkey;
- ALTER TABLE ONLY quotas ADD CONSTRAINT quotas_pkey
- PRIMARY KEY (id);"""
- migrate_engine.execute(sql)
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- if migrate_engine.name == "postgresql":
- sql = """ALTER TABLE cells_id_seq RENAME TO zones_id_seq;
- ALTER TABLE ONLY cells DROP CONSTRAINT cells_pkey;
- ALTER TABLE ONLY cells ADD CONSTRAINT zones_pkey
- PRIMARY KEY (id);
-
- ALTER TABLE quotas_id_seq RENAME TO quotas_new_id_seq;
- ALTER TABLE ONLY quotas DROP CONSTRAINT quotas_pkey;
- ALTER TABLE ONLY quotas ADD CONSTRAINT quotas_new_pkey
- PRIMARY KEY (id);"""
- migrate_engine.execute(sql)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/095_change_fk_instance_id_to_uuid.py b/nova/db/sqlalchemy/migrate_repo/versions/095_change_fk_instance_id_to_uuid.py
deleted file mode 100644
index 08501177d..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/095_change_fk_instance_id_to_uuid.py
+++ /dev/null
@@ -1,94 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack LLC.
-# Copyright 2012 SolidFire Inc
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from migrate import ForeignKeyConstraint
-from sqlalchemy import MetaData, Integer, String, Table
-from sqlalchemy import select, Column
-
-from nova.openstack.common import log as logging
-
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- instances = Table('instances', meta, autoload=True)
- volumes = Table('volumes', meta, autoload=True)
- instance_uuid_column = Column('instance_uuid', String(36))
-
- instance_uuid_column.create(volumes)
- try:
- volumes.update().values(
- instance_uuid=select(
- [instances.c.uuid],
- instances.c.id == volumes.c.instance_id)
- ).execute()
- except Exception:
- instance_uuid_column.drop()
-
- fkeys = list(volumes.c.instance_id.foreign_keys)
- if fkeys:
- try:
- fk_name = fkeys[0].constraint.name
- ForeignKeyConstraint(
- columns=[volumes.c.instance_id],
- refcolumns=[instances.c.id],
- name=fk_name).drop()
-
- except Exception:
- LOG.error(_("foreign key could not be dropped"))
- raise
-
- volumes.c.instance_id.drop()
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- instances = Table('instances', meta, autoload=True)
- volumes = Table('volumes', meta, autoload=True)
- instance_id_column = Column('instance_id', Integer)
-
- instance_id_column.create(volumes)
- try:
- volumes.update().values(
- instance_id=select(
- [instances.c.id],
- instances.c.uuid == volumes.c.instance_uuid)
- ).execute()
- except Exception:
- instance_id_column.drop()
-
- fkeys = list(volumes.c.instance_id.foreign_keys)
- if fkeys:
- try:
- fk_name = fkeys[0].constraint.name
- ForeignKeyConstraint(
- columns=[volumes.c.instance_id],
- refcolumns=[instances.c.id],
- name=fk_name).create()
-
- except Exception:
- LOG.error(_("foreign key could not be created"))
- raise
-
- volumes.c.instance_uuid.drop()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/095_sqlite_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/095_sqlite_downgrade.sql
deleted file mode 100644
index 7c13455e4..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/095_sqlite_downgrade.sql
+++ /dev/null
@@ -1,133 +0,0 @@
-BEGIN TRANSACTION;
- -- change instance_id volumes table
- CREATE TABLE volumes_backup(
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id VARCHAR(36) NOT NULL,
- ec2_id INTEGER,
- user_id VARCHAR(255),
- project_id VARCHAR(255),
- snapshot_id VARCHAR(36),
- host VARCHAR(255),
- size INTEGER,
- availability_zone VARCHAR(255),
- instance_id INTEGER,
- instance_uuid VARCHAR(36),
- mountpoint VARCHAR(255),
- attach_time VARCHAR(255),
- status VARCHAR(255),
- attach_status VARCHAR(255),
- scheduled_at DATETIME,
- launched_at DATETIME,
- terminated_at DATETIME,
- display_name VARCHAR(255),
- display_description VARCHAR(255),
- provider_location VARCHAR(255),
- provider_auth VARCHAR(255),
- volume_type_id INTEGER,
- PRIMARY KEY (id),
- FOREIGN KEY(instance_id) REFERENCES instances (id),
- UNIQUE (id),
- CHECK (deleted IN (0, 1))
- );
-
- INSERT INTO volumes_backup SELECT
- created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- ec2_id,
- user_id,
- project_id,
- snapshot_id,
- host,
- size,
- availability_zone,
- NULL,
- instance_uuid,
- mountpoint,
- attach_time,
- status,
- attach_status,
- scheduled_at,
- launched_at,
- terminated_at,
- display_name,
- display_description,
- provider_location,
- provider_auth,
- volume_type_id
- FROM volumes;
-
- UPDATE volumes_backup
- SET instance_id =
- (SELECT id
- FROM instances
- WHERE volumes_backup.instance_uuid = instances.uuid
- );
- DROP TABLE volumes;
-
- CREATE TABLE volumes(
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id VARCHAR(36) NOT NULL,
- ec2_id INTEGER,
- user_id VARCHAR(255),
- project_id VARCHAR(255),
- snapshot_id VARCHAR(36),
- host VARCHAR(255),
- size INTEGER,
- availability_zone VARCHAR(255),
- instance_id INTEGER,
- mountpoint VARCHAR(255),
- attach_time VARCHAR(255),
- status VARCHAR(255),
- attach_status VARCHAR(255),
- scheduled_at DATETIME,
- launched_at DATETIME,
- terminated_at DATETIME,
- display_name VARCHAR(255),
- display_description VARCHAR(255),
- provider_location VARCHAR(255),
- provider_auth VARCHAR(255),
- volume_type_id INTEGER,
- PRIMARY KEY (id),
- FOREIGN KEY (instance_id) REFERENCES instances (id),
- UNIQUE (id),
- CHECK (deleted IN (0, 1))
- );
-
- INSERT INTO volumes
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- ec2_id,
- user_id,
- project_id,
- snapshot_id,
- host,
- size,
- availability_zone,
- instance_id,
- mountpoint,
- attach_time,
- status,
- attach_status,
- scheduled_at,
- launched_at,
- terminated_at,
- display_name,
- display_description,
- provider_location,
- provider_auth,
- volume_type_id
- FROM volumes_backup;
- DROP TABLE volumes_backup;
-COMMIT;
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/095_sqlite_upgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/095_sqlite_upgrade.sql
deleted file mode 100644
index 130e11030..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/095_sqlite_upgrade.sql
+++ /dev/null
@@ -1,132 +0,0 @@
-BEGIN TRANSACTION;
- -- change instance_id volumes table
- CREATE TABLE volumes_backup(
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id VARCHAR(36) NOT NULL,
- ec2_id INTEGER,
- user_id VARCHAR(255),
- project_id VARCHAR(255),
- snapshot_id VARCHAR(36),
- host VARCHAR(255),
- size INTEGER,
- availability_zone VARCHAR(255),
- instance_id INTEGER,
- instance_uuid VARCHAR(36),
- mountpoint VARCHAR(255),
- attach_time VARCHAR(255),
- status VARCHAR(255),
- attach_status VARCHAR(255),
- scheduled_at DATETIME,
- launched_at DATETIME,
- terminated_at DATETIME,
- display_name VARCHAR(255),
- display_description VARCHAR(255),
- provider_location VARCHAR(255),
- provider_auth VARCHAR(255),
- volume_type_id INTEGER,
- PRIMARY KEY (id),
- FOREIGN KEY(instance_id) REFERENCES instances (id),
- UNIQUE (id),
- CHECK (deleted IN (0, 1))
- );
-
- INSERT INTO volumes_backup SELECT
- created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- ec2_id,
- user_id,
- project_id,
- snapshot_id,
- host,
- size,
- availability_zone,
- instance_id,
- NULL,
- mountpoint,
- attach_time,
- status,
- attach_status,
- scheduled_at,
- launched_at,
- terminated_at,
- display_name,
- display_description,
- provider_location,
- provider_auth,
- volume_type_id
- FROM volumes;
-
- UPDATE volumes_backup
- SET instance_uuid =
- (SELECT uuid
- FROM instances
- WHERE volumes_backup.instance_id = instances.id
- );
- DROP TABLE volumes;
-
- CREATE TABLE volumes(
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id VARCHAR(36) NOT NULL,
- ec2_id INTEGER,
- user_id VARCHAR(255),
- project_id VARCHAR(255),
- snapshot_id VARCHAR(36),
- host VARCHAR(255),
- size INTEGER,
- availability_zone VARCHAR(255),
- instance_uuid VARCHAR(36),
- mountpoint VARCHAR(255),
- attach_time VARCHAR(255),
- status VARCHAR(255),
- attach_status VARCHAR(255),
- scheduled_at DATETIME,
- launched_at DATETIME,
- terminated_at DATETIME,
- display_name VARCHAR(255),
- display_description VARCHAR(255),
- provider_location VARCHAR(255),
- provider_auth VARCHAR(255),
- volume_type_id INTEGER,
- PRIMARY KEY (id),
- UNIQUE (id),
- CHECK (deleted IN (0, 1))
- );
-
- INSERT INTO volumes
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- ec2_id,
- user_id,
- project_id,
- snapshot_id,
- host,
- size,
- availability_zone,
- instance_uuid,
- mountpoint,
- attach_time,
- status,
- attach_status,
- scheduled_at,
- launched_at,
- terminated_at,
- display_name,
- display_description,
- provider_location,
- provider_auth,
- volume_type_id
- FROM volumes_backup;
- DROP TABLE volumes_backup;
-COMMIT;
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/096_recreate_dns_domains.py b/nova/db/sqlalchemy/migrate_repo/versions/096_recreate_dns_domains.py
deleted file mode 100644
index 0e51c644f..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/096_recreate_dns_domains.py
+++ /dev/null
@@ -1,145 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2012 Red Hat, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from migrate import ForeignKeyConstraint
-from sqlalchemy import Boolean, Column, DateTime, ForeignKey
-from sqlalchemy import MetaData, String, Table
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- # NOTE(dprince): The old dns_domains table is in the 'latin1'
- # charset and had its primary key length set to 512.
- # This is too long to be a valid pkey in the 'utf8' table charset
- # and is the root cause of errors like:
- #
- # 1) Dumping a database with mysqldump and trying to import it fails
- # because this table is latin1 but fkeys to utf8 tables (projects).
- #
- # 2) Trying to alter the old dns_domains table fails with errors like:
- # mysql> ALTER TABLE dns_domains DROP PRIMARY KEY;
- # ERROR 1025 (HY000): Error on rename of './nova/#sql-6cf_855'....
- #
- # In short this table is just in a bad state. So... lets create a new one
- # with a shorter 'domain' column which is valid for the utf8 charset.
- # https://bugs.launchpad.net/nova/+bug/993663
-
- #rename old table
- dns_domains_old = Table('dns_domains', meta, autoload=True)
- dns_domains_old.rename(name='dns_domains_old')
-
- # NOTE(dprince): manually remove pkey/fkey for postgres
- if migrate_engine.name == "postgresql":
- sql = """ALTER TABLE ONLY dns_domains_old DROP CONSTRAINT
- dns_domains_pkey;
- ALTER TABLE ONLY dns_domains_old DROP CONSTRAINT
- dns_domains_project_id_fkey;"""
- migrate_engine.execute(sql)
-
- #Bind new metadata to avoid issues after the rename
- meta = MetaData()
- meta.bind = migrate_engine
- projects = Table('projects', meta, autoload=True) # Required for fkey
-
- dns_domains_new = Table('dns_domains', meta,
- Column('created_at', DateTime),
- Column('updated_at', DateTime),
- Column('deleted_at', DateTime),
- Column('deleted', Boolean),
- Column('domain', String(length=255), nullable=False, primary_key=True),
- Column('scope', String(length=255)),
- Column('availability_zone', String(length=255)),
- Column('project_id', String(length=255), ForeignKey('projects.id')),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
- dns_domains_new.create()
-
- dns_domains_old = Table('dns_domains_old', meta, autoload=True)
- record_list = list(dns_domains_old.select().execute())
- for rec in record_list:
- row = dns_domains_new.insert()
- row.execute({'created_at': rec['created_at'],
- 'updated_at': rec['updated_at'],
- 'deleted_at': rec['deleted_at'],
- 'deleted': rec['deleted'],
- 'domain': rec['domain'],
- 'scope': rec['scope'],
- 'availability_zone': rec['availability_zone'],
- 'project_id': rec['project_id'],
- })
-
- dns_domains_old.drop()
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- dns_domains_old = Table('dns_domains', meta, autoload=True)
- dns_domains_old.rename(name='dns_domains_old')
-
- # NOTE(dprince): manually remove pkey/fkey for postgres
- if migrate_engine.name == "postgresql":
- sql = """ALTER TABLE ONLY dns_domains_old DROP CONSTRAINT
- dns_domains_pkey;
- ALTER TABLE ONLY dns_domains_old DROP CONSTRAINT
- dns_domains_project_id_fkey;"""
- migrate_engine.execute(sql)
-
- #Bind new metadata to avoid issues after the rename
- meta = MetaData()
- meta.bind = migrate_engine
-
- dns_domains_new = Table('dns_domains', meta,
- Column('created_at', DateTime),
- Column('updated_at', DateTime),
- Column('deleted_at', DateTime),
- Column('deleted', Boolean),
- Column('domain', String(length=512), primary_key=True, nullable=False),
- Column('scope', String(length=255)),
- Column('availability_zone', String(length=255)),
- Column('project_id', String(length=255)),
- mysql_engine='InnoDB',
- mysql_charset='latin1',
- )
- dns_domains_new.create()
-
- dns_domains_old = Table('dns_domains_old', meta, autoload=True)
- record_list = list(dns_domains_old.select().execute())
- for rec in record_list:
- row = dns_domains_new.insert()
- row.execute({'created_at': rec['created_at'],
- 'updated_at': rec['updated_at'],
- 'deleted_at': rec['deleted_at'],
- 'deleted': rec['deleted'],
- 'domain': rec['domain'],
- 'scope': rec['scope'],
- 'availability_zone': rec['availability_zone'],
- 'project_id': rec['project_id'],
- })
-
- dns_domains_old.drop()
-
- # NOTE(dprince): We can't easily add the MySQL Fkey on the downgrade
- # because projects is 'utf8' where dns_domains is 'latin1'.
- if migrate_engine.name != "mysql":
- projects = Table('projects', meta, autoload=True)
- fkey = ForeignKeyConstraint(columns=[dns_domains_new.c.project_id],
- refcolumns=[projects.c.id])
- fkey.create()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/097_quota_usages_reservations.py b/nova/db/sqlalchemy/migrate_repo/versions/097_quota_usages_reservations.py
deleted file mode 100644
index 82d66938c..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/097_quota_usages_reservations.py
+++ /dev/null
@@ -1,106 +0,0 @@
-# Copyright 2012 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Boolean, Column, DateTime
-from sqlalchemy import MetaData, Integer, String, Table, ForeignKey
-
-from nova.openstack.common import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- # New tables
- quota_usages = Table('quota_usages', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True),
- Column('project_id',
- String(length=255, convert_unicode=True,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False),
- index=True),
- Column('resource',
- String(length=255, convert_unicode=True,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False)),
- Column('in_use', Integer(), nullable=False),
- Column('reserved', Integer(), nullable=False),
- Column('until_refresh', Integer(), nullable=True),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
-
- try:
- quota_usages.create()
- except Exception:
- LOG.error(_("Table |%s| not created!"), repr(quota_usages))
- raise
-
- reservations = Table('reservations', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True),
- Column('uuid',
- String(length=36, convert_unicode=True,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False), nullable=False),
- Column('usage_id', Integer(), ForeignKey('quota_usages.id'),
- nullable=False),
- Column('project_id',
- String(length=255, convert_unicode=True,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False),
- index=True),
- Column('resource',
- String(length=255, convert_unicode=True,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False)),
- Column('delta', Integer(), nullable=False),
- Column('expire', DateTime(timezone=False)),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
-
- try:
- reservations.create()
- except Exception:
- LOG.error(_("Table |%s| not created!"), repr(reservations))
- raise
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- quota_usages = Table('quota_usages', meta, autoload=True)
- try:
- quota_usages.drop()
- except Exception:
- LOG.error(_("quota_usages table not dropped"))
- raise
-
- reservations = Table('reservations', meta, autoload=True)
- try:
- reservations.drop()
- except Exception:
- LOG.error(_("reservations table not dropped"))
- raise
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/098_update_volume_attach_time.py b/nova/db/sqlalchemy/migrate_repo/versions/098_update_volume_attach_time.py
deleted file mode 100644
index 680b27df7..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/098_update_volume_attach_time.py
+++ /dev/null
@@ -1,72 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-#
-# Copyright (c) 2012 Canonical Ltd.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import select, Column, Table, MetaData, String, DateTime
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- volumes = Table('volumes', meta, autoload=True)
- attach_datetime = Column('attachtime_datetime', DateTime(timezone=False))
- attach_datetime.create(volumes)
-
- old_attachtime = volumes.c.attach_time
-
- try:
- volumes_list = list(volumes.select().execute())
- for v in volumes_list:
- attach_time = select([volumes.c.attach_time],
- volumes.c.id == v['id']).execute().fetchone()[0]
- volumes.update().\
- where(volumes.c.id == v['id']).\
- values(attachtime_datetime=attach_time).execute()
- except Exception:
- attach_datetime.drop()
- raise
-
- old_attachtime.alter(name='attach_time_old')
- attach_datetime.alter(name='attach_time')
- old_attachtime.drop()
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- volumes = Table('volumes', meta, autoload=True)
- attach_string = Column('attachtime_string', String(255))
- attach_string.create(volumes)
-
- old_attachtime = volumes.c.attach_time
-
- try:
- volumes_list = list(volumes.select().execute())
- for v in volumes_list:
- attach_time = select([volumes.c.attach_time],
- volumes.c.id == v['id']).execute().fetchone()[0]
- volumes.update().\
- where(volumes.c.id == v['id']).\
- values(attachtime_string=attach_time).execute()
- except Exception:
- attach_string.drop()
- raise
-
- old_attachtime.alter(name='attach_time_old')
- attach_string.alter(name='attach_time')
- old_attachtime.drop()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/100_instance_metadata_uses_uuid.py b/nova/db/sqlalchemy/migrate_repo/versions/100_instance_metadata_uses_uuid.py
deleted file mode 100644
index e5c2a275d..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/100_instance_metadata_uses_uuid.py
+++ /dev/null
@@ -1,80 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 OpenStack LLC.
-# Copyright 2012 Michael Still and Canonical Inc
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from migrate import ForeignKeyConstraint
-from sqlalchemy import MetaData, String, Table
-from sqlalchemy import select, Column, ForeignKey, Integer
-
-from nova.openstack.common import log as logging
-
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- instance_metadata = Table('instance_metadata', meta, autoload=True)
- instances = Table('instances', meta, autoload=True)
- uuid_column = Column('instance_uuid', String(36))
- uuid_column.create(instance_metadata)
-
- try:
- instance_metadata.update().values(
- instance_uuid=select(
- [instances.c.uuid],
- instances.c.id == instance_metadata.c.instance_id)
- ).execute()
- except Exception:
- uuid_column.drop()
- raise
-
- fkeys = list(instance_metadata.c.instance_id.foreign_keys)
- if fkeys:
- try:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(
- columns=[instance_metadata.c.instance_id],
- refcolumns=[instances.c.id],
- name=fkey_name).drop()
- except Exception:
- LOG.error(_("foreign key constraint couldn't be removed"))
- raise
-
- instance_metadata.c.instance_id.drop()
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- instance_metadata = Table('instance_metadata', meta, autoload=True)
- instances = Table('instances', meta, autoload=True)
- id_column = Column('instance_id', Integer, ForeignKey('instances.id'))
- id_column.create(instance_metadata)
-
- try:
- instance_metadata.update().values(
- instance_id=select(
- [instances.c.id],
- instances.c.uuid == instance_metadata.c.instance_uuid)
- ).execute()
- except Exception:
- id_column.drop()
- raise
-
- instance_metadata.c.instance_uuid.drop()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/100_sqlite_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/100_sqlite_downgrade.sql
deleted file mode 100644
index 97b628c6e..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/100_sqlite_downgrade.sql
+++ /dev/null
@@ -1,64 +0,0 @@
-BEGIN TRANSACTION;
- CREATE TEMPORARY TABLE instance_metadata_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- instance_id INTEGER NOT NULL,
- instance_uuid VARCHAR(36),
- key VARCHAR(255) NOT NULL,
- value VARCHAR(255) NOT NULL,
- PRIMARY KEY (id)
- );
-
- INSERT INTO instance_metadata_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- NULL,
- instance_uuid,
- key,
- value
- FROM instance_metadata;
-
- UPDATE instance_metadata_backup
- SET instance_id=
- (SELECT id
- FROM instances
- WHERE instance_metadata_backup.instance_uuid = instances.uuid
- );
-
- DROP TABLE instance_metadata;
-
- CREATE TABLE instance_metadata (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- instance_id INTEGER NOT NULL,
- key VARCHAR(255) NOT NULL,
- value VARCHAR(255) NOT NULL,
- PRIMARY KEY (id),
- FOREIGN KEY(instance_id) REFERENCES instances (id)
- );
-
- CREATE INDEX instance_metadata_instance_id_idx ON instance_metadata(instance_id);
-
- INSERT INTO instance_metadata
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- instance_id,
- key,
- value
- FROM instance_metadata_backup;
-
- DROP TABLE instance_metadata_backup;
-
-COMMIT; \ No newline at end of file
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/100_sqlite_upgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/100_sqlite_upgrade.sql
deleted file mode 100644
index 0d1e1ca8b..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/100_sqlite_upgrade.sql
+++ /dev/null
@@ -1,64 +0,0 @@
-BEGIN TRANSACTION;
- CREATE TEMPORARY TABLE instance_metadata_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- instance_id INTEGER NOT NULL,
- instance_uuid VARCHAR(36),
- key VARCHAR(255) NOT NULL,
- value VARCHAR(255) NOT NULL,
- PRIMARY KEY (id)
- );
-
- INSERT INTO instance_metadata_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- instance_id,
- NULL,
- key,
- value
- FROM instance_metadata;
-
- UPDATE instance_metadata_backup
- SET instance_uuid=
- (SELECT uuid
- FROM instances
- WHERE instance_metadata_backup.instance_id = instances.id
- );
-
- DROP TABLE instance_metadata;
-
- CREATE TABLE instance_metadata (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- instance_uuid VARCHAR(36) NOT NULL,
- key VARCHAR(255) NOT NULL,
- value VARCHAR(255) NOT NULL,
- PRIMARY KEY (id),
- FOREIGN KEY(instance_uuid) REFERENCES instances (uuid)
- );
-
- CREATE INDEX instance_metadata_instance_uuid_idx ON instance_metadata(instance_uuid);
-
- INSERT INTO instance_metadata
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- instance_uuid,
- key,
- value
- FROM instance_metadata_backup;
-
- DROP TABLE instance_metadata_backup;
-
-COMMIT;
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/101_security_group_instance_association_uses_uuid.py b/nova/db/sqlalchemy/migrate_repo/versions/101_security_group_instance_association_uses_uuid.py
deleted file mode 100644
index 26b53bb7e..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/101_security_group_instance_association_uses_uuid.py
+++ /dev/null
@@ -1,80 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 OpenStack LLC.
-# Copyright 2012 Michael Still and Canonical Inc
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from migrate import ForeignKeyConstraint
-from sqlalchemy import MetaData, String, Table
-from sqlalchemy import select, Column, ForeignKey, Integer
-
-from nova.openstack.common import log as logging
-
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- sgia = Table('security_group_instance_association', meta, autoload=True)
- instances = Table('instances', meta, autoload=True)
- uuid_column = Column('instance_uuid', String(36))
- uuid_column.create(sgia)
-
- try:
- sgia.update().values(
- instance_uuid=select(
- [instances.c.uuid],
- instances.c.id == sgia.c.instance_id)
- ).execute()
- except Exception:
- uuid_column.drop()
- raise
-
- fkeys = list(sgia.c.instance_id.foreign_keys)
- if fkeys:
- try:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(
- columns=[sgia.c.instance_id],
- refcolumns=[instances.c.id],
- name=fkey_name).drop()
- except Exception:
- LOG.error(_("foreign key constraint couldn't be removed"))
- raise
-
- sgia.c.instance_id.drop()
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- sgia = Table('security_group_instance_association', meta, autoload=True)
- instances = Table('instances', meta, autoload=True)
- id_column = Column('instance_id', Integer, ForeignKey('instances.id'))
- id_column.create(sgia)
-
- try:
- sgia.update().values(
- instance_id=select(
- [instances.c.id],
- instances.c.uuid == sgia.c.instance_uuid)
- ).execute()
- except Exception:
- id_column.drop()
- raise
-
- sgia.c.instance_uuid.drop()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/101_sqlite_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/101_sqlite_downgrade.sql
deleted file mode 100644
index 08aaa241c..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/101_sqlite_downgrade.sql
+++ /dev/null
@@ -1,61 +0,0 @@
-BEGIN TRANSACTION;
- CREATE TEMPORARY TABLE security_group_instance_association_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- security_group_id INTEGER NOT NULL,
- instance_id INTEGER NOT NULL,
- instance_uuid VARCHAR(36),
- PRIMARY KEY (id)
- );
-
- INSERT INTO security_group_instance_association_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- security_group_id,
- NULL,
- instance_uuid
- FROM security_group_instance_association;
-
- UPDATE security_group_instance_association_backup
- SET instance_id=
- (SELECT id
- FROM instances
- WHERE security_group_instance_association_backup.instance_uuid = instances.uuid
- );
-
- DROP TABLE security_group_instance_association;
-
- CREATE TABLE security_group_instance_association (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- security_group_id INTEGER NOT NULL,
- instance_id INTEGER NOT NULL,
- PRIMARY KEY (id),
- FOREIGN KEY(instance_id) REFERENCES instances (id)
- );
-
- CREATE INDEX security_group_instance_association_security_group_id_idx ON security_group_instance_association(security_group_id);
- CREATE INDEX security_group_instance_association_instance_id_idx ON security_group_instance_association(instance_id);
-
- INSERT INTO security_group_instance_association
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- security_group_id,
- instance_id
- FROM security_group_instance_association_backup;
-
- DROP TABLE security_group_instance_association_backup;
-
-COMMIT; \ No newline at end of file
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/101_sqlite_upgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/101_sqlite_upgrade.sql
deleted file mode 100644
index d66c5ce37..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/101_sqlite_upgrade.sql
+++ /dev/null
@@ -1,61 +0,0 @@
-BEGIN TRANSACTION;
- CREATE TEMPORARY TABLE security_group_instance_association_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- security_group_id INTEGER NOT NULL,
- instance_id INTEGER NOT NULL,
- instance_uuid VARCHAR(36),
- PRIMARY KEY (id)
- );
-
- INSERT INTO security_group_instance_association_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- security_group_id,
- instance_id,
- NULL
- FROM security_group_instance_association;
-
- UPDATE security_group_instance_association_backup
- SET instance_uuid=
- (SELECT uuid
- FROM instances
- WHERE security_group_instance_association_backup.instance_id = instances.id
- );
-
- DROP TABLE security_group_instance_association;
-
- CREATE TABLE security_group_instance_association (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- security_group_id INTEGER NOT NULL,
- instance_uuid VARCHAR(36),
- PRIMARY KEY (id),
- FOREIGN KEY(instance_uuid) REFERENCES instances (uuid)
- );
-
- CREATE INDEX security_group_instance_association_security_group_id_idx ON security_group_instance_association(security_group_id);
- CREATE INDEX security_group_instance_association_instance_uuid_idx ON security_group_instance_association(instance_uuid);
-
- INSERT INTO security_group_instance_association
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- security_group_id,
- instance_uuid
- FROM security_group_instance_association_backup;
-
- DROP TABLE security_group_instance_association_backup;
-
-COMMIT;
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/102_consoles_uses_uuid.py b/nova/db/sqlalchemy/migrate_repo/versions/102_consoles_uses_uuid.py
deleted file mode 100644
index 1cfa523c6..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/102_consoles_uses_uuid.py
+++ /dev/null
@@ -1,80 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 OpenStack LLC.
-# Copyright 2012 Michael Still and Canonical Inc
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from migrate import ForeignKeyConstraint
-from sqlalchemy import MetaData, String, Table
-from sqlalchemy import select, Column, ForeignKey, Integer
-
-from nova.openstack.common import log as logging
-
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- consoles = Table('consoles', meta, autoload=True)
- instances = Table('instances', meta, autoload=True)
- uuid_column = Column('instance_uuid', String(36))
- uuid_column.create(consoles)
-
- try:
- consoles.update().values(
- instance_uuid=select(
- [instances.c.uuid],
- instances.c.id == consoles.c.instance_id)
- ).execute()
- except Exception:
- uuid_column.drop()
- raise
-
- fkeys = list(consoles.c.instance_id.foreign_keys)
- if fkeys:
- try:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(
- columns=[consoles.c.instance_id],
- refcolumns=[instances.c.id],
- name=fkey_name).drop()
- except Exception:
- LOG.error(_("foreign key constraint couldn't be removed"))
- raise
-
- consoles.c.instance_id.drop()
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- consoles = Table('consoles', meta, autoload=True)
- instances = Table('instances', meta, autoload=True)
- id_column = Column('instance_id', Integer, ForeignKey('instances.id'))
- id_column.create(consoles)
-
- try:
- consoles.update().values(
- instance_id=select(
- [instances.c.id],
- instances.c.uuid == consoles.c.instance_uuid)
- ).execute()
- except Exception:
- id_column.drop()
- raise
-
- consoles.c.instance_uuid.drop()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/102_sqlite_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/102_sqlite_downgrade.sql
deleted file mode 100644
index 50f260549..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/102_sqlite_downgrade.sql
+++ /dev/null
@@ -1,72 +0,0 @@
-BEGIN TRANSACTION;
- CREATE TEMPORARY TABLE consoles_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- instance_name VARCHAR(255),
- instance_id INTEGER NOT NULL,
- instance_uuid VARCHAR(36),
- password VARCHAR(255),
- port INTEGER,
- pool_id INTEGER,
- PRIMARY KEY (id)
- );
-
- INSERT INTO consoles_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- instance_name,
- NULL,
- instance_uuid,
- password,
- port,
- pool_id
- FROM consoles;
-
- UPDATE consoles_backup
- SET instance_uuid=
- (SELECT id
- FROM instances
- WHERE consoles_backup.instance_uuid = instances.uuid
- );
-
- DROP TABLE consoles;
-
- CREATE TABLE consoles (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- instance_name VARCHAR(255),
- instance_id INTEGER NOT NULL,
- password VARCHAR(255),
- port INTEGER,
- pool_id INTEGER,
- PRIMARY KEY (id),
- FOREIGN KEY(instance_id) REFERENCES instances (id)
- );
-
- CREATE INDEX consoles_pool_id ON consoles(pool_id);
-
- INSERT INTO consoles
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- instance_name,
- instance_id,
- password,
- port,
- pool_id
- FROM consoles_backup;
-
- DROP TABLE consoles_backup;
-
-COMMIT; \ No newline at end of file
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/102_sqlite_upgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/102_sqlite_upgrade.sql
deleted file mode 100644
index ef48162bc..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/102_sqlite_upgrade.sql
+++ /dev/null
@@ -1,72 +0,0 @@
-BEGIN TRANSACTION;
- CREATE TEMPORARY TABLE consoles_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- instance_name VARCHAR(255),
- instance_id INTEGER NOT NULL,
- instance_uuid VARCHAR(36),
- password VARCHAR(255),
- port INTEGER,
- pool_id INTEGER,
- PRIMARY KEY (id)
- );
-
- INSERT INTO consoles_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- instance_name,
- instance_id,
- NULL,
- password,
- port,
- pool_id
- FROM consoles;
-
- UPDATE consoles_backup
- SET instance_uuid=
- (SELECT uuid
- FROM instances
- WHERE consoles_backup.instance_id = instances.id
- );
-
- DROP TABLE consoles;
-
- CREATE TABLE consoles (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- instance_name VARCHAR(255),
- instance_uuid VARCHAR(36),
- password VARCHAR(255),
- port INTEGER,
- pool_id INTEGER,
- PRIMARY KEY (id),
- FOREIGN KEY(instance_uuid) REFERENCES instances (uuid)
- );
-
- CREATE INDEX consoles_pool_id ON consoles(pool_id);
-
- INSERT INTO consoles
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- instance_name,
- instance_uuid,
- password,
- port,
- pool_id
- FROM consoles_backup;
-
- DROP TABLE consoles_backup;
-
-COMMIT;
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/103_instance_indexes.py b/nova/db/sqlalchemy/migrate_repo/versions/103_instance_indexes.py
deleted file mode 100644
index 0eeac2587..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/103_instance_indexes.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 OpenStack LLC.
-# Copyright 2012 Michael Still and Canonical Inc
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Index, MetaData, Table
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- for table in ['block_device_mapping',
- 'consoles',
- 'volumes']:
- t = Table(table, meta, autoload=True)
- i = Index('%s_instance_uuid_idx' % table, t.c.instance_uuid)
- i.create(migrate_engine)
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- for table in ['block_device_mapping',
- 'consoles',
- 'volumes']:
- t = Table(table, meta, autoload=True)
- i = Index('%s_instance_uuid_idx' % table, t.c.instance_uuid)
- i.drop(migrate_engine)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/104_instance_indexes_2.py b/nova/db/sqlalchemy/migrate_repo/versions/104_instance_indexes_2.py
deleted file mode 100644
index 4bf6b0484..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/104_instance_indexes_2.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 OpenStack LLC.
-# Copyright 2012 Michael Still and Canonical Inc
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Index, MetaData, Table
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- # NOTE(mikal): these weren't done in 103 because sqlite already has the
- # index.
- for table in ['instance_metadata',
- 'security_group_instance_association']:
- t = Table(table, meta, autoload=True)
- i = Index('%s_instance_uuid_idx' % table, t.c.instance_uuid)
- i.create(migrate_engine)
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- for table in ['instance_metadata',
- 'security_group_instance_association']:
- t = Table(table, meta, autoload=True)
- i = Index('%s_instance_uuid_idx' % table, t.c.instance_uuid)
- i.drop(migrate_engine)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/104_sqlite_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/104_sqlite_downgrade.sql
deleted file mode 100644
index 8d115abb8..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/104_sqlite_downgrade.sql
+++ /dev/null
@@ -1 +0,0 @@
-SELECT 'noop'; \ No newline at end of file
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/104_sqlite_upgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/104_sqlite_upgrade.sql
deleted file mode 100644
index 8d115abb8..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/104_sqlite_upgrade.sql
+++ /dev/null
@@ -1 +0,0 @@
-SELECT 'noop'; \ No newline at end of file
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/105_instance_info_caches_uses_uuid.py b/nova/db/sqlalchemy/migrate_repo/versions/105_instance_info_caches_uses_uuid.py
deleted file mode 100644
index c4c13e539..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/105_instance_info_caches_uses_uuid.py
+++ /dev/null
@@ -1,70 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 OpenStack LLC.
-# Copyright 2012 Michael Still and Canonical Inc
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import MetaData, Table
-from migrate import ForeignKeyConstraint
-
-from nova.openstack.common import log as logging
-
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- instances = Table('instances', meta, autoload=True)
- instance_info_caches = Table('instance_info_caches', meta, autoload=True)
-
- # We need to remove the foreign key constraint or the column rename will
- # fail
- fkeys = list(instance_info_caches.c.instance_id.foreign_keys)
- try:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(
- columns=[instance_info_caches.c.instance_id],
- refcolumns=[instances.c.uuid],
- name=fkey_name).drop()
- except Exception:
- LOG.error(_("foreign key constraint couldn't be removed"))
- raise
-
- instance_info_caches.c.instance_id.alter(name='instance_uuid')
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- instances = Table('instances', meta, autoload=True)
- instance_info_caches = Table('instance_info_caches', meta, autoload=True)
-
- # We need to remove the foreign key constraint or the column rename will
- # fail
- fkeys = list(instance_info_caches.c.instance_uuid.foreign_keys)
- if fkeys:
- try:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(
- columns=[instance_info_caches.c.instance_uuid],
- refcolumns=[instances.c.uuid],
- name=fkey_name).drop()
- except Exception:
- LOG.error(_("foreign key constraint couldn't be removed"))
- raise
-
- instance_info_caches.c.instance_uuid.alter(name='instance_id')
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/105_sqlite_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/105_sqlite_downgrade.sql
deleted file mode 100644
index 563b1245a..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/105_sqlite_downgrade.sql
+++ /dev/null
@@ -1,50 +0,0 @@
-BEGIN TRANSACTION;
- CREATE TEMPORARY TABLE instance_info_caches_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- network_info TEXT,
- instance_id VARCHAR(36),
- PRIMARY KEY (id)
- );
-
- INSERT INTO instance_info_caches_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- network_info,
- instance_uuid as instance_id
- FROM instance_info_caches;
-
- DROP TABLE instance_info_caches;
-
- CREATE TABLE instance_info_caches (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- network_info TEXT,
- instance_id VARCHAR(36),
- PRIMARY KEY (id)
- );
-
- CREATE INDEX instance_info_caches_instance_id_idx ON instance_info_caches(instance_id);
-
- INSERT INTO instance_info_caches
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- network_info,
- instance_id
- FROM instance_info_caches_backup;
-
- DROP TABLE instance_info_caches_backup;
-
-COMMIT;
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/105_sqlite_upgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/105_sqlite_upgrade.sql
deleted file mode 100644
index 4e675749e..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/105_sqlite_upgrade.sql
+++ /dev/null
@@ -1,50 +0,0 @@
-BEGIN TRANSACTION;
- CREATE TEMPORARY TABLE instance_info_caches_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- network_info TEXT,
- instance_uuid VARCHAR(36),
- PRIMARY KEY (id)
- );
-
- INSERT INTO instance_info_caches_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- network_info,
- instance_id as instance_uuid
- FROM instance_info_caches;
-
- DROP TABLE instance_info_caches;
-
- CREATE TABLE instance_info_caches (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- network_info TEXT,
- instance_uuid VARCHAR(36),
- PRIMARY KEY (id)
- );
-
- CREATE INDEX instance_info_caches_instance_uuid_idx ON instance_info_caches(instance_uuid);
-
- INSERT INTO instance_info_caches
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- network_info,
- instance_uuid
- FROM instance_info_caches_backup;
-
- DROP TABLE instance_info_caches_backup;
-
-COMMIT;
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/106_add_foreign_keys.py b/nova/db/sqlalchemy/migrate_repo/versions/106_add_foreign_keys.py
deleted file mode 100644
index 2c483007c..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/106_add_foreign_keys.py
+++ /dev/null
@@ -1,67 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 OpenStack LLC.
-# Copyright 2012 Michael Still and Canonical Inc
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import MetaData, Table
-from migrate import ForeignKeyConstraint
-
-from nova.openstack.common import log as logging
-
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- instances = Table('instances', meta, autoload=True)
-
- for table in ['block_device_mapping',
- 'consoles',
- 'instance_info_caches',
- 'instance_metadata',
- 'security_group_instance_association']:
- t = Table(table, meta, autoload=True)
-
- try:
- ForeignKeyConstraint(
- columns=[t.c.instance_uuid],
- refcolumns=[instances.c.uuid]).create()
- except Exception:
- LOG.error(_("foreign key constraint couldn't be created"))
- raise
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- instances = Table('instances', meta, autoload=True)
-
- for table in ['block_device_mapping',
- 'consoles',
- 'instance_info_caches',
- 'instance_metadata',
- 'security_group_instance_association']:
- t = Table(table, meta, autoload=True)
-
- try:
- ForeignKeyConstraint(
- columns=[t.c.instance_uuid],
- refcolumns=[instances.c.uuid]).drop()
- except Exception:
- LOG.error(_("foreign key constraint couldn't be dropped"))
- raise
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/106_sqlite_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/106_sqlite_downgrade.sql
deleted file mode 100644
index 8d115abb8..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/106_sqlite_downgrade.sql
+++ /dev/null
@@ -1 +0,0 @@
-SELECT 'noop'; \ No newline at end of file
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/106_sqlite_upgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/106_sqlite_upgrade.sql
deleted file mode 100644
index 8d115abb8..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/106_sqlite_upgrade.sql
+++ /dev/null
@@ -1 +0,0 @@
-SELECT 'noop'; \ No newline at end of file
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/107_add_instance_id_mappings.py b/nova/db/sqlalchemy/migrate_repo/versions/107_add_instance_id_mappings.py
deleted file mode 100644
index 250906c62..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/107_add_instance_id_mappings.py
+++ /dev/null
@@ -1,67 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright 2012 SINA Corp.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.openstack.common import log as logging
-from sqlalchemy import Boolean, Column, DateTime, Integer
-from sqlalchemy import MetaData, String, Table
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- # create new table
- instance_id_mappings = Table('instance_id_mappings', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted',
- Boolean(create_constraint=True, name=None)),
- Column('id', Integer(),
- primary_key=True,
- nullable=False,
- autoincrement=True),
- Column('uuid', String(36), index=True, nullable=False))
- try:
- instance_id_mappings.create()
- except Exception:
- LOG.exception("Exception while creating table 'instance_id_mappings'")
- meta.drop_all(tables=[instance_id_mappings])
- raise
-
- if migrate_engine.name == "mysql":
- migrate_engine.execute("ALTER TABLE instance_id_mappings "
- "Engine=InnoDB")
-
- instances = Table('instances', meta, autoload=True)
- instance_id_mappings = Table('instance_id_mappings', meta, autoload=True)
-
- instance_list = list(instances.select().execute())
- for instance in instance_list:
- instance_id = instance['id']
- uuid = instance['uuid']
- row = instance_id_mappings.insert()
- row.execute({'id': instance_id, 'uuid': uuid})
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- instance_id_mappings = Table('instance_id_mappings', meta, autoload=True)
- instance_id_mappings.drop()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/108_task_log.py b/nova/db/sqlalchemy/migrate_repo/versions/108_task_log.py
deleted file mode 100644
index d8593bd77..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/108_task_log.py
+++ /dev/null
@@ -1,62 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright 2012 SINA Corp.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Boolean, Column, DateTime, Integer
-from sqlalchemy import MetaData, String, Table
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- # create new table
- task_log = Table('task_log', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted',
- Boolean(create_constraint=True, name=None)),
- Column('id', Integer(),
- primary_key=True,
- nullable=False,
- autoincrement=True),
- Column('task_name', String(255), nullable=False),
- Column('state', String(255), nullable=False),
- Column('host', String(255), index=True, nullable=False),
- Column('period_beginning', String(255),
- index=True, nullable=False),
- Column('period_ending', String(255), index=True, nullable=False),
- Column('message', String(255), nullable=False),
- Column('task_items', Integer()),
- Column('errors', Integer()),
- )
- try:
- task_log.create()
- except Exception:
- meta.drop_all(tables=[task_log])
- raise
-
- if migrate_engine.name == "mysql":
- migrate_engine.execute("ALTER TABLE task_log "
- "Engine=InnoDB")
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- task_log = Table('task_log', meta, autoload=True)
- task_log.drop()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/109_drop_dns_domains_project_id_fkey.py b/nova/db/sqlalchemy/migrate_repo/versions/109_drop_dns_domains_project_id_fkey.py
deleted file mode 100644
index a2b0792d3..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/109_drop_dns_domains_project_id_fkey.py
+++ /dev/null
@@ -1,63 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from migrate import ForeignKeyConstraint
-from sqlalchemy import MetaData, Table
-
-from nova.openstack.common import log as logging
-
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- dns_domains = Table('dns_domains', meta, autoload=True)
- projects = Table('projects', meta, autoload=True)
-
- fkeys = list(dns_domains.c.project_id.foreign_keys)
- if fkeys:
- try:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(
- columns=[dns_domains.c.project_id],
- refcolumns=[projects.c.id],
- name=fkey_name).drop()
- except Exception:
- LOG.error(_("foreign key constraint couldn't be removed"))
- raise
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- dns_domains = Table('dns_domains', meta, autoload=True)
- projects = Table('projects', meta, autoload=True)
-
- kwargs = {
- 'columns': [dns_domains.c.project_id],
- 'refcolumns': [projects.c.id],
- }
-
- if migrate_engine.name == 'mysql':
- # For MySQL we name our fkeys explicitly so they match Essex
- kwargs['name'] = 'dns_domains_ibfk_1'
-
- ForeignKeyConstraint(**kwargs).create()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/109_sqlite_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/109_sqlite_downgrade.sql
deleted file mode 100644
index ffb4d132e..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/109_sqlite_downgrade.sql
+++ /dev/null
@@ -1,53 +0,0 @@
-BEGIN TRANSACTION;
- CREATE TEMPORARY TABLE dns_domains_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- domain VARCHAR(512) NOT NULL,
- scope VARCHAR(255),
- availability_zone VARCHAR(255),
- project_id VARCHAR(255),
- PRIMARY KEY (domain)
- );
-
- INSERT INTO dns_domains_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- domain,
- scope,
- availability_zone,
- project_id
- FROM dns_domains;
-
- DROP TABLE dns_domains;
-
- CREATE TABLE dns_domains (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- domain VARCHAR(512) NOT NULL,
- scope VARCHAR(255),
- availability_zone VARCHAR(255),
- project_id VARCHAR(255),
- PRIMARY KEY (domain),
- FOREIGN KEY (project_id) REFERENCES projects (id)
- );
-
- INSERT INTO dns_domains
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- domain,
- scope,
- availability_zone,
- project_id
- FROM dns_domains_backup;
-
- DROP TABLE dns_domains_backup;
-
-COMMIT;
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/109_sqlite_upgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/109_sqlite_upgrade.sql
deleted file mode 100644
index eeb481658..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/109_sqlite_upgrade.sql
+++ /dev/null
@@ -1,52 +0,0 @@
-BEGIN TRANSACTION;
- CREATE TEMPORARY TABLE dns_domains_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- domain VARCHAR(512) NOT NULL,
- scope VARCHAR(255),
- availability_zone VARCHAR(255),
- project_id VARCHAR(255),
- PRIMARY KEY (domain)
- );
-
- INSERT INTO dns_domains_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- domain,
- scope,
- availability_zone,
- project_id
- FROM dns_domains;
-
- DROP TABLE dns_domains;
-
- CREATE TABLE dns_domains (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- domain VARCHAR(512) NOT NULL,
- scope VARCHAR(255),
- availability_zone VARCHAR(255),
- project_id VARCHAR(255),
- PRIMARY KEY (domain)
- );
-
- INSERT INTO dns_domains
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- domain,
- scope,
- availability_zone,
- project_id
- FROM dns_domains_backup;
-
- DROP TABLE dns_domains_backup;
-
-COMMIT;
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/110_drop_deprecated_auth.py b/nova/db/sqlalchemy/migrate_repo/versions/110_drop_deprecated_auth.py
deleted file mode 100644
index 734a3729f..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/110_drop_deprecated_auth.py
+++ /dev/null
@@ -1,189 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from migrate import ForeignKeyConstraint
-from sqlalchemy import (Boolean, Column, DateTime, ForeignKey,
- Index, MetaData, String, Table)
-
-from nova.openstack.common import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- tables = (
- "user_project_role_association",
- "user_project_association",
- "user_role_association",
- "projects",
- "users",
- "auth_tokens",
- )
- for table_name in tables:
- Table(table_name, meta, autoload=True).drop()
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- auth_tokens = Table('auth_tokens', meta,
- Column('created_at', DateTime),
- Column('updated_at', DateTime),
- Column('deleted_at', DateTime),
- Column('deleted', Boolean),
- Column('token_hash', String(length=255), primary_key=True,
- nullable=False),
- Column('user_id', String(length=255)),
- Column('server_management_url', String(length=255)),
- Column('storage_url', String(length=255)),
- Column('cdn_management_url', String(length=255)),
- mysql_engine='InnoDB',
- )
-
- projects = Table('projects', meta,
- Column('created_at', DateTime),
- Column('updated_at', DateTime),
- Column('deleted_at', DateTime),
- Column('deleted', Boolean),
- Column('id', String(length=255), primary_key=True, nullable=False),
- Column('name', String(length=255)),
- Column('description', String(length=255)),
- Column('project_manager', String(length=255), ForeignKey('users.id')),
- mysql_engine='InnoDB',
- )
-
- user_project_association = Table('user_project_association', meta,
- Column('created_at', DateTime),
- Column('updated_at', DateTime),
- Column('deleted_at', DateTime),
- Column('deleted', Boolean),
- Column('user_id', String(length=255), primary_key=True,
- nullable=False),
- Column('project_id', String(length=255), primary_key=True,
- nullable=False),
- mysql_engine='InnoDB',
- )
-
- user_project_role_association = \
- Table('user_project_role_association', meta,
- Column('created_at', DateTime),
- Column('updated_at', DateTime),
- Column('deleted_at', DateTime),
- Column('deleted', Boolean),
- Column('user_id', String(length=255), primary_key=True,
- nullable=False),
- Column('project_id', String(length=255), primary_key=True,
- nullable=False),
- Column('role', String(length=255), primary_key=True, nullable=False),
- mysql_engine='InnoDB',
- )
-
- user_role_association = Table('user_role_association', meta,
- Column('created_at', DateTime),
- Column('updated_at', DateTime),
- Column('deleted_at', DateTime),
- Column('deleted', Boolean),
- Column('user_id', String(length=255), ForeignKey('users.id'),
- primary_key=True, nullable=False),
- Column('role', String(length=255), primary_key=True, nullable=False),
- mysql_engine='InnoDB',
- )
-
- users = Table('users', meta,
- Column('created_at', DateTime),
- Column('updated_at', DateTime),
- Column('deleted_at', DateTime),
- Column('deleted', Boolean),
- Column('id', String(length=255), primary_key=True, nullable=False),
- Column('name', String(length=255)),
- Column('access_key', String(length=255)),
- Column('secret_key', String(length=255)),
- Column('is_admin', Boolean),
- mysql_engine='InnoDB',
- )
-
- tables = [users, projects, user_project_association,
- auth_tokens, user_project_role_association,
- user_role_association]
-
- for table in tables:
- try:
- table.create()
- except Exception:
- LOG.exception('Exception while creating table.')
- raise
-
- if migrate_engine.name == 'mysql':
- index = Index('project_id', user_project_association.c.project_id)
- index.create(migrate_engine)
-
- fkeys = [
- [
- [user_project_role_association.c.user_id,
- user_project_role_association.c.project_id],
- [user_project_association.c.user_id,
- user_project_association.c.project_id],
- 'user_project_role_association_ibfk_1',
- ],
- [
- [user_project_association.c.user_id],
- [users.c.id],
- 'user_project_association_ibfk_1',
- ],
- [
- [user_project_association.c.project_id],
- [projects.c.id],
- 'user_project_association_ibfk_2',
- ],
- ]
-
- for fkey_pair in fkeys:
- if migrate_engine.name == 'mysql':
- # For MySQL we name our fkeys explicitly so they match Essex
- fkey = ForeignKeyConstraint(columns=fkey_pair[0],
- refcolumns=fkey_pair[1],
- name=fkey_pair[2])
- fkey.create()
- elif migrate_engine.name == 'postgresql':
- fkey = ForeignKeyConstraint(columns=fkey_pair[0],
- refcolumns=fkey_pair[1])
- fkey.create()
-
- # Hopefully this entire loop to set the charset can go away during
- # the "E" release compaction. See the notes on the dns_domains
- # table above for why this is required vs. setting mysql_charset inline.
- if migrate_engine.name == "mysql":
- tables = [
- # tables that are FK parents, must be converted early
- "projects",
- "user_project_association",
- "users",
- # those that are children and others later
- "auth_tokens",
- "user_project_role_association",
- "user_role_association",
- ]
- sql = "SET foreign_key_checks = 0;"
- for table in tables:
- sql += "ALTER TABLE %s CONVERT TO CHARACTER SET utf8;" % table
- sql += "SET foreign_key_checks = 1;"
- sql += "ALTER DATABASE %s DEFAULT CHARACTER SET utf8;" \
- % migrate_engine.url.database
- migrate_engine.execute(sql)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/111_general_aggregates.py b/nova/db/sqlalchemy/migrate_repo/versions/111_general_aggregates.py
deleted file mode 100644
index df4a83843..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/111_general_aggregates.py
+++ /dev/null
@@ -1,72 +0,0 @@
-# Copyright 2012 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import String, Column, MetaData, Table, delete, select
-from migrate.changeset import UniqueConstraint
-
-from nova.openstack.common import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- dialect = migrate_engine.url.get_dialect().name
-
- aggregates = Table('aggregates', meta, autoload=True)
- aggregate_metadata = Table('aggregate_metadata', meta, autoload=True)
- record_list = list(aggregates.select().execute())
- for rec in record_list:
- row = aggregate_metadata.insert()
- row.execute({'created_at': rec['created_at'],
- 'updated_at': rec['updated_at'],
- 'deleted_at': rec['deleted_at'],
- 'deleted': rec['deleted'],
- 'key': 'operational_state',
- 'value': rec['operational_state'],
- 'aggregate_id': rec['id'],
- })
- aggregates.drop_column('operational_state')
-
- aggregate_hosts = Table('aggregate_hosts', meta, autoload=True)
- if dialect.startswith('sqlite'):
- aggregate_hosts.c.host.alter(unique=False)
- elif dialect.startswith('postgres'):
- ucon = UniqueConstraint('host',
- name='aggregate_hosts_host_key',
- table=aggregate_hosts)
- ucon.drop()
- else:
- col = aggregate_hosts.c.host
- UniqueConstraint(col, name='host').drop()
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- aggregates = Table('aggregates', meta, autoload=True)
- aggregate_metadata = Table('aggregate_metadata', meta, autoload=True)
- operational_state = Column('operational_state', String(255))
- aggregates.create_column(operational_state)
- aggregates.update().values(operational_state=select(
- [aggregate_metadata.c.value]).where(aggregates.c.id ==
- aggregate_metadata.c.aggregate_id and aggregate_metadata.c.key ==
- 'operational_state')).execute()
- delete(aggregate_metadata, aggregate_metadata.c.key == 'operational_state')
- aggregates.c.operational_state.alter(nullable=False)
- aggregate_hosts = Table('aggregate_hosts', meta, autoload=True)
- aggregate_hosts.c.host.alter(unique=True)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/112_update_deleted_instance_data.py b/nova/db/sqlalchemy/migrate_repo/versions/112_update_deleted_instance_data.py
deleted file mode 100644
index 27ad13e91..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/112_update_deleted_instance_data.py
+++ /dev/null
@@ -1,69 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import datetime
-from sqlalchemy import MetaData, Table
-from sqlalchemy import and_, between
-
-
-TABLES = ('instance_metadata',
- 'instance_system_metadata',
- 'block_device_mapping')
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- instances = Table('instances', meta, autoload=True)
-
- instance_list = list(instances.select().
- where(instances.c.deleted == True).execute())
- for table_name in TABLES:
- table = Table(table_name, meta, autoload=True)
-
- for instance in instance_list:
- if not instance['deleted_at']:
- continue
- table.update(
- (and_(table.c.deleted == True,
- table.c.instance_uuid == instance['uuid'],
- between(table.c.deleted_at,
- instance['deleted_at'] - datetime.timedelta(seconds=2),
- instance['deleted_at'] + datetime.timedelta(seconds=2)))
- ),
- {table.c.deleted: False,
- table.c.deleted_at: None}
- ).execute()
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- instances = Table('instances', meta, autoload=True)
-
- instance_list = list(instances.select().
- where(instances.c.deleted == True).execute())
- for table_name in TABLES:
- table = Table(table_name, meta, autoload=True)
- for instance in instance_list:
- table.update(
- (and_(table.c.deleted == False,
- table.c.instance_uuid == instance['uuid'])
- ),
- {table.c.deleted: True,
- table.c.deleted_at: instance['deleted_at']}
- ).execute()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/113_fixed_ips_uses_uuid.py b/nova/db/sqlalchemy/migrate_repo/versions/113_fixed_ips_uses_uuid.py
deleted file mode 100644
index d51bbb912..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/113_fixed_ips_uses_uuid.py
+++ /dev/null
@@ -1,108 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 OpenStack LLC.
-# Copyright 2012 Michael Still and Canonical Inc
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from migrate import ForeignKeyConstraint
-from sqlalchemy import MetaData, String, Table
-from sqlalchemy import select, Column, ForeignKey, Integer
-
-from nova.openstack.common import log as logging
-
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- fixed_ips = Table('fixed_ips', meta, autoload=True)
- instances = Table('instances', meta, autoload=True)
- uuid_column = Column('instance_uuid', String(36))
- uuid_column.create(fixed_ips)
-
- try:
- fixed_ips.update().values(
- instance_uuid=select(
- [instances.c.uuid],
- instances.c.id == fixed_ips.c.instance_id)
- ).execute()
- except Exception:
- uuid_column.drop()
- raise
-
- fkeys = list(fixed_ips.c.instance_id.foreign_keys)
- if fkeys:
- try:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(
- columns=[fixed_ips.c.instance_id],
- refcolumns=[instances.c.id],
- name=fkey_name).drop()
- except Exception:
- LOG.error(_("foreign key constraint couldn't be removed"))
- raise
-
- fixed_ips.c.instance_id.drop()
-
- try:
- ForeignKeyConstraint(
- columns=[fixed_ips.c.instance_uuid],
- refcolumns=[instances.c.uuid]).create()
- except Exception:
- LOG.error(_("foreign key constraint couldn't be created"))
- raise
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- fixed_ips = Table('fixed_ips', meta, autoload=True)
- instances = Table('instances', meta, autoload=True)
- id_column = Column('instance_id', Integer, ForeignKey('instances.id'))
- id_column.create(fixed_ips)
-
- fkeys = list(fixed_ips.c.instance_uuid.foreign_keys)
- if fkeys:
- try:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(
- columns=[fixed_ips.c.instance_uuid],
- refcolumns=[instances.c.uuid],
- name=fkey_name).drop()
- except Exception:
- LOG.error(_("foreign key constraint couldn't be removed"))
- raise
-
- try:
- fixed_ips.update().values(
- instance_id=select(
- [instances.c.id],
- instances.c.uuid == fixed_ips.c.instance_uuid)
- ).execute()
- except Exception:
- id_column.drop()
- raise
-
- fixed_ips.c.instance_uuid.drop()
-
- try:
- ForeignKeyConstraint(
- columns=[fixed_ips.c.instance_id],
- refcolumns=[instances.c.id]).create()
- except Exception:
- LOG.error(_("foreign key constraint couldn't be created"))
- raise
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/113_sqlite_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/113_sqlite_downgrade.sql
deleted file mode 100644
index 0a7a7bed9..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/113_sqlite_downgrade.sql
+++ /dev/null
@@ -1,85 +0,0 @@
-BEGIN TRANSACTION;
- CREATE TEMPORARY TABLE fixed_ips_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- address VARCHAR(255),
- network_id INTEGER,
- instance_id INTEGER NOT NULL,
- instance_uuid VARCHAR(36),
- allocated BOOLEAN,
- leased BOOLEAN,
- reserved BOOLEAN,
- virtual_interface_id INTEGER,
- host VARCHAR(255),
- PRIMARY KEY (id)
- );
-
- INSERT INTO fixed_ips_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- address,
- network_id,
- NULL,
- instance_uuid,
- allocated,
- leased,
- reserved,
- virtual_interface_id,
- host
- FROM fixed_ips;
-
- UPDATE fixed_ips_backup
- SET instance_id=
- (SELECT id
- FROM instances
- WHERE fixed_ips_backup.instance_uuid = instances.uuid
- );
-
- DROP TABLE fixed_ips;
-
- CREATE TABLE fixed_ips (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- address VARCHAR(255),
- network_id INTEGER,
- instance_id INTEGER,
- allocated BOOLEAN,
- leased BOOLEAN,
- reserved BOOLEAN,
- virtual_interface_id INTEGER,
- host VARCHAR(255),
- PRIMARY KEY (id),
- FOREIGN KEY(instance_id) REFERENCES instances (id)
- );
-
- CREATE INDEX fixed_ips_id ON fixed_ips(id);
- CREATE INDEX address ON fixed_ips(address);
-
- INSERT INTO fixed_ips
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- address,
- network_id,
- instance_id,
- allocated,
- leased,
- reserved,
- virtual_interface_id,
- host
- FROM fixed_ips_backup;
-
- DROP TABLE fixed_ips_backup;
-
-COMMIT; \ No newline at end of file
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/113_sqlite_upgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/113_sqlite_upgrade.sql
deleted file mode 100644
index 417b5bfe3..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/113_sqlite_upgrade.sql
+++ /dev/null
@@ -1,85 +0,0 @@
-BEGIN TRANSACTION;
- CREATE TEMPORARY TABLE fixed_ips_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- address VARCHAR(255),
- network_id INTEGER,
- instance_id INTEGER NOT NULL,
- instance_uuid VARCHAR(36),
- allocated BOOLEAN,
- leased BOOLEAN,
- reserved BOOLEAN,
- virtual_interface_id INTEGER,
- host VARCHAR(255),
- PRIMARY KEY (id)
- );
-
- INSERT INTO fixed_ips_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- address,
- network_id,
- instance_id,
- NULL,
- allocated,
- leased,
- reserved,
- virtual_interface_id,
- host
- FROM fixed_ips;
-
- UPDATE fixed_ips_backup
- SET instance_uuid=
- (SELECT uuid
- FROM instances
- WHERE fixed_ips_backup.instance_id = instances.id
- );
-
- DROP TABLE fixed_ips;
-
- CREATE TABLE fixed_ips (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- address VARCHAR(255),
- network_id INTEGER,
- instance_uuid VARCHAR(36),
- allocated BOOLEAN,
- leased BOOLEAN,
- reserved BOOLEAN,
- virtual_interface_id INTEGER,
- host VARCHAR(255),
- PRIMARY KEY (id),
- FOREIGN KEY(instance_uuid) REFERENCES instances (uuid)
- );
-
- CREATE INDEX fixed_ips_id ON fixed_ips(id);
- CREATE INDEX address ON fixed_ips(address);
-
- INSERT INTO fixed_ips
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- address,
- network_id,
- instance_uuid,
- allocated,
- leased,
- reserved,
- virtual_interface_id,
- host
- FROM fixed_ips_backup;
-
- DROP TABLE fixed_ips_backup;
-
-COMMIT;
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/114_sqlite_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/114_sqlite_downgrade.sql
deleted file mode 100644
index bb210025a..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/114_sqlite_downgrade.sql
+++ /dev/null
@@ -1,71 +0,0 @@
-BEGIN TRANSACTION;
- CREATE TEMPORARY TABLE virtual_interfaces_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- address VARCHAR(255),
- network_id INTEGER,
- instance_id INTEGER,
- instance_uuid VARCHAR(36),
- uuid VARCHAR(36),
- PRIMARY KEY (id)
- );
-
- INSERT INTO virtual_interfaces_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- address,
- network_id,
- NULL,
- instance_uuid,
- uuid
- FROM virtual_interfaces;
-
- UPDATE virtual_interfaces_backup
- SET instance_id=
- (SELECT id
- FROM instances
- WHERE virtual_interfaces_backup.instance_uuid = instances.uuid
- );
-
- DROP TABLE virtual_interfaces;
-
- CREATE TABLE virtual_interfaces (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- address VARCHAR(255),
- network_id INTEGER,
- instance_id VARCHAR(36) NOT NULL,
- uuid VARCHAR(36),
- PRIMARY KEY (id),
- FOREIGN KEY(instance_id) REFERENCES instances (id)
- );
-
- CREATE INDEX virtual_interfaces_instance_id ON
- virtual_interfaces(instance_id);
- CREATE INDEX virtual_interfaces_network_id ON
- virtual_interfaces(network_id);
-
- INSERT INTO virtual_interfaces
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- address,
- network_id,
- instance_id,
- uuid
- FROM virtual_interfaces_backup;
-
- DROP TABLE virtual_interfaces_backup;
-
-COMMIT; \ No newline at end of file
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/114_sqlite_upgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/114_sqlite_upgrade.sql
deleted file mode 100644
index 5ee98d5c1..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/114_sqlite_upgrade.sql
+++ /dev/null
@@ -1,71 +0,0 @@
-BEGIN TRANSACTION;
- CREATE TEMPORARY TABLE virtual_interfaces_backup (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- address VARCHAR(255),
- network_id INTEGER,
- instance_id INTEGER,
- instance_uuid VARCHAR(36),
- uuid VARCHAR(36),
- PRIMARY KEY (id)
- );
-
- INSERT INTO virtual_interfaces_backup
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- address,
- network_id,
- instance_id,
- NULL,
- uuid
- FROM virtual_interfaces;
-
- UPDATE virtual_interfaces_backup
- SET instance_uuid=
- (SELECT uuid
- FROM instances
- WHERE virtual_interfaces_backup.instance_id = instances.id
- );
-
- DROP TABLE virtual_interfaces;
-
- CREATE TABLE virtual_interfaces (
- created_at DATETIME,
- updated_at DATETIME,
- deleted_at DATETIME,
- deleted BOOLEAN,
- id INTEGER NOT NULL,
- address VARCHAR(255),
- network_id INTEGER,
- instance_uuid VARCHAR(36) NOT NULL,
- uuid VARCHAR(36),
- PRIMARY KEY (id),
- FOREIGN KEY(instance_uuid) REFERENCES instances (uuid)
- );
-
- CREATE INDEX virtual_interfaces_instance_uuid ON
- virtual_interfaces(instance_uuid);
- CREATE INDEX virtual_interfaces_network_id ON
- virtual_interfaces(network_id);
-
- INSERT INTO virtual_interfaces
- SELECT created_at,
- updated_at,
- deleted_at,
- deleted,
- id,
- address,
- network_id,
- instance_uuid,
- uuid
- FROM virtual_interfaces_backup;
-
- DROP TABLE virtual_interfaces_backup;
-
-COMMIT;
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/114_vifs_uses_uuid.py b/nova/db/sqlalchemy/migrate_repo/versions/114_vifs_uses_uuid.py
deleted file mode 100644
index 8f7ad1a15..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/114_vifs_uses_uuid.py
+++ /dev/null
@@ -1,108 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 OpenStack LLC.
-# Copyright 2012 Michael Still and Canonical Inc
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from migrate import ForeignKeyConstraint
-from sqlalchemy import MetaData, String, Table
-from sqlalchemy import select, Column, ForeignKey, Integer
-
-from nova.openstack.common import log as logging
-
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- virtual_interfaces = Table('virtual_interfaces', meta, autoload=True)
- instances = Table('instances', meta, autoload=True)
- uuid_column = Column('instance_uuid', String(36))
- uuid_column.create(virtual_interfaces)
-
- try:
- virtual_interfaces.update().values(
- instance_uuid=select(
- [instances.c.uuid],
- instances.c.id == virtual_interfaces.c.instance_id)
- ).execute()
- except Exception:
- uuid_column.drop()
- raise
-
- fkeys = list(virtual_interfaces.c.instance_id.foreign_keys)
- if fkeys:
- try:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(
- columns=[virtual_interfaces.c.instance_id],
- refcolumns=[instances.c.id],
- name=fkey_name).drop()
- except Exception:
- LOG.error(_("foreign key constraint couldn't be removed"))
- raise
-
- virtual_interfaces.c.instance_id.drop()
-
- try:
- ForeignKeyConstraint(
- columns=[virtual_interfaces.c.instance_uuid],
- refcolumns=[instances.c.uuid]).create()
- except Exception:
- LOG.error(_("foreign key constraint couldn't be created"))
- raise
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- virtual_interfaces = Table('virtual_interfaces', meta, autoload=True)
- instances = Table('instances', meta, autoload=True)
- id_column = Column('instance_id', Integer, ForeignKey('instances.id'))
- id_column.create(virtual_interfaces)
-
- fkeys = list(virtual_interfaces.c.instance_uuid.foreign_keys)
- if fkeys:
- try:
- fkey_name = fkeys[0].constraint.name
- ForeignKeyConstraint(
- columns=[virtual_interfaces.c.instance_uuid],
- refcolumns=[instances.c.uuid],
- name=fkey_name).drop()
- except Exception:
- LOG.error(_("foreign key constraint couldn't be removed"))
- raise
-
- try:
- virtual_interfaces.update().values(
- instance_id=select(
- [instances.c.id],
- instances.c.uuid == virtual_interfaces.c.instance_uuid)
- ).execute()
- except Exception:
- id_column.drop()
- raise
-
- virtual_interfaces.c.instance_uuid.drop()
-
- try:
- ForeignKeyConstraint(
- columns=[virtual_interfaces.c.instance_id],
- refcolumns=[instances.c.id]).create()
- except Exception:
- LOG.error(_("foreign key constraint couldn't be created"))
- raise
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/115_make_user_quotas_key_and_value.py b/nova/db/sqlalchemy/migrate_repo/versions/115_make_user_quotas_key_and_value.py
deleted file mode 100644
index 447307952..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/115_make_user_quotas_key_and_value.py
+++ /dev/null
@@ -1,94 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.openstack.common import log as logging
-from sqlalchemy import Boolean, Column, DateTime, Integer
-from sqlalchemy import MetaData, String, Table
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- # Upgrade operations go here. Don't create your own engine;
- # bind migrate_engine to your metadata
- meta = MetaData()
- meta.bind = migrate_engine
-
- # Add 'user_id' column to quota_usages table.
- quota_usages = Table('quota_usages', meta, autoload=True)
- user_id = Column('user_id',
- String(length=255, convert_unicode=False,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False))
- quota_usages.create_column(user_id)
-
- # Add 'user_id' column to reservations table.
- reservations = Table('reservations', meta, autoload=True)
- user_id = Column('user_id',
- String(length=255, convert_unicode=False,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False))
- reservations.create_column(user_id)
-
- # New table.
- user_quotas = Table('user_quotas', meta,
- Column('id', Integer(), primary_key=True),
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(), default=False),
- Column('user_id',
- String(length=255, convert_unicode=False,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False)),
- Column('project_id',
- String(length=255, convert_unicode=False,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False)),
- Column('resource',
- String(length=255, convert_unicode=False,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False),
- nullable=False),
- Column('hard_limit', Integer(), nullable=True),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
-
- try:
- user_quotas.create()
- except Exception:
- LOG.error(_("Table |%s| not created!"), repr(user_quotas))
- raise
-
-
-def downgrade(migrate_engine):
- # Operations to reverse the above upgrade go here.
- meta = MetaData()
- meta.bind = migrate_engine
-
- quota_usages = Table('quota_usages', meta, autoload=True)
- quota_usages.drop_column('user_id')
-
- reservations = Table('reservations', meta, autoload=True)
- reservations.drop_column('user_id')
-
- user_quotas = Table('user_quotas', meta, autoload=True)
- try:
- user_quotas.drop()
- except Exception:
- LOG.error(_("user_quotas table not dropped"))
- raise
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/116_drop_user_quotas_key_and_value.py b/nova/db/sqlalchemy/migrate_repo/versions/116_drop_user_quotas_key_and_value.py
deleted file mode 100644
index ccf9d66b8..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/116_drop_user_quotas_key_and_value.py
+++ /dev/null
@@ -1,98 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.openstack.common import log as logging
-from sqlalchemy import Boolean, Column, DateTime, Integer
-from sqlalchemy import MetaData, String, Table
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- # Reverse the previous migration
- meta = MetaData()
- meta.bind = migrate_engine
-
- reservations = Table('reservations', meta, autoload=True)
- d = reservations.delete(reservations.c.deleted == True)
- d.execute()
- reservations.drop_column('user_id')
-
- quota_usages = Table('quota_usages', meta, autoload=True)
- d = quota_usages.delete(quota_usages.c.user_id != None)
- d.execute()
- quota_usages.drop_column('user_id')
-
- user_quotas = Table('user_quotas', meta, autoload=True)
- try:
- user_quotas.drop()
- except Exception:
- LOG.error(_("user_quotas table not dropped"))
- raise
-
-
-def downgrade(migrate_engine):
- # Undo the reversal of the previous migration
- # (data is not preserved)
- meta = MetaData()
- meta.bind = migrate_engine
-
- # Add 'user_id' column to quota_usages table.
- quota_usages = Table('quota_usages', meta, autoload=True)
- user_id = Column('user_id',
- String(length=255, convert_unicode=False,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False))
- quota_usages.create_column(user_id)
-
- # Add 'user_id' column to reservations table.
- reservations = Table('reservations', meta, autoload=True)
- user_id = Column('user_id',
- String(length=255, convert_unicode=False,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False))
- reservations.create_column(user_id)
-
- # New table.
- user_quotas = Table('user_quotas', meta,
- Column('id', Integer(), primary_key=True),
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(), default=False),
- Column('user_id',
- String(length=255, convert_unicode=False,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False)),
- Column('project_id',
- String(length=255, convert_unicode=False,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False)),
- Column('resource',
- String(length=255, convert_unicode=False,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False),
- nullable=False),
- Column('hard_limit', Integer(), nullable=True),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
-
- try:
- user_quotas.create()
- except Exception:
- LOG.error(_("Table |%s| not created!"), repr(user_quotas))
- raise
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/117_add_compute_node_stats.py b/nova/db/sqlalchemy/migrate_repo/versions/117_add_compute_node_stats.py
deleted file mode 100644
index 5b0e19660..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/117_add_compute_node_stats.py
+++ /dev/null
@@ -1,61 +0,0 @@
-# Copyright (c) 2012 OpenStack, LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Boolean, Column, DateTime, Integer
-from sqlalchemy import Index, MetaData, String, Table
-from nova.openstack.common import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- # load tables for fk
- compute_nodes = Table('compute_nodes', meta, autoload=True)
-
- # create new table
- compute_node_stats = Table('compute_node_stats', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(create_constraint=True, name=None)),
- Column('id', Integer(), primary_key=True, nullable=False,
- autoincrement=True),
- Column('compute_node_id', Integer, index=True, nullable=False),
- Column('key', String(length=255, convert_unicode=True,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False), nullable=False),
- Column('value', String(length=255, convert_unicode=True,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False)),
- mysql_engine='InnoDB')
- try:
- compute_node_stats.create()
- except Exception:
- LOG.exception("Exception while creating table 'compute_node_stats'")
- raise
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- # load tables for fk
- compute_nodes = Table('compute_nodes', meta, autoload=True)
-
- compute_node_stats = Table('compute_node_stats', meta, autoload=True)
- compute_node_stats.drop()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/118_add_indexes_to_agent_builds.py b/nova/db/sqlalchemy/migrate_repo/versions/118_add_indexes_to_agent_builds.py
deleted file mode 100644
index 23f7d3cdb..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/118_add_indexes_to_agent_builds.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Index, MetaData, Table
-from sqlalchemy.exc import IntegrityError
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- # Based on agent_build_get_by_triple
- # from: nova/db/sqlalchemy/api.py
- t = Table('agent_builds', meta, autoload=True)
- i = Index('agent_builds_hypervisor_os_arch_idx',
- t.c.hypervisor, t.c.os, t.c.architecture)
- try:
- i.create(migrate_engine)
- except IntegrityError:
- pass
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- t = Table('agent_builds', meta, autoload=True)
- i = Index('agent_builds_hypervisor_os_arch_idx',
- t.c.hypervisor, t.c.os, t.c.architecture)
- i.drop(migrate_engine)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/120_add_indexes_to_block_device_mapping.py b/nova/db/sqlalchemy/migrate_repo/versions/120_add_indexes_to_block_device_mapping.py
deleted file mode 100644
index 432fd91a0..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/120_add_indexes_to_block_device_mapping.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Index, MetaData, Table
-from sqlalchemy.exc import IntegrityError
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- t = Table('block_device_mapping', meta, autoload=True)
-
- # Based on block_device_mapping_update_or_create
- # from: nova/db/sqlalchemy/api.py
- i = Index('block_device_mapping_instance_uuid_device_name_idx',
- t.c.instance_uuid, t.c.device_name)
- try:
- i.create(migrate_engine)
- except IntegrityError:
- pass
-
- # Based on block_device_mapping_update_or_create
- # from: nova/db/sqlalchemy/api.py
- i = Index(
- 'block_device_mapping_instance_uuid_virtual_name_device_name_idx',
- t.c.instance_uuid, t.c.virtual_name, t.c.device_name)
- i.create(migrate_engine)
-
- # Based on block_device_mapping_destroy_by_instance_and_volume
- # from: nova/db/sqlalchemy/api.py
- i = Index('block_device_mapping_instance_uuid_volume_id_idx',
- t.c.instance_uuid, t.c.volume_id)
- try:
- i.create(migrate_engine)
- except IntegrityError:
- pass
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- t = Table('block_device_mapping', meta, autoload=True)
-
- i = Index('block_device_mapping_instance_uuid_device_name_idx',
- t.c.instance_uuid, t.c.device_name)
- i.drop(migrate_engine)
-
- i = Index(
- 'block_device_mapping_instance_uuid_virtual_name_device_name_idx',
- t.c.instance_uuid, t.c.virtual_name, t.c.device_name)
- i.drop(migrate_engine)
-
- i = Index('block_device_mapping_instance_uuid_volume_id_idx',
- t.c.instance_uuid, t.c.volume_id)
- i.drop(migrate_engine)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/121_add_indexes_to_bw_usage_cache.py b/nova/db/sqlalchemy/migrate_repo/versions/121_add_indexes_to_bw_usage_cache.py
deleted file mode 100644
index 1345e5396..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/121_add_indexes_to_bw_usage_cache.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Index, MetaData, Table
-from sqlalchemy.exc import IntegrityError, OperationalError
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- # Based on bw_usage_get_by_uuids
- # from: nova/db/sqlalchemy/api.py
- t = Table('bw_usage_cache', meta, autoload=True)
- i = Index('bw_usage_cache_uuid_start_period_idx',
- t.c.uuid, t.c.start_period)
- try:
- i.create(migrate_engine)
- except IntegrityError:
- pass
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- t = Table('bw_usage_cache', meta, autoload=True)
- i = Index('bw_usage_cache_uuid_start_period_idx',
- t.c.uuid, t.c.start_period)
- if migrate_engine.url.get_dialect().name.startswith('sqlite'):
- try:
- i.drop(migrate_engine)
- except OperationalError:
- # Sqlite is very broken for any kind of table modification.
- # adding columns creates a new table, then copies the data,
- # and looses the indexes.
- # Thus later migrations that add columns will cause the
- # earlier migration's downgrade unittests to fail on
- # dropping indexes.
- # Honestly testing migrations on sqlite is not really a very
- # valid test (because of above facts), but that is for
- # another day. (mdragon)
- pass
- else:
- i.drop(migrate_engine)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/122_add_indexes_to_certificates.py b/nova/db/sqlalchemy/migrate_repo/versions/122_add_indexes_to_certificates.py
deleted file mode 100644
index 1201ce6be..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/122_add_indexes_to_certificates.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Index, MetaData, Table
-from sqlalchemy.exc import IntegrityError
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- t = Table('certificates', meta, autoload=True)
-
- # Based on certificate_get_all_by_project
- # from: nova/db/sqlalchemy/api.py
- i = Index('certificates_project_id_deleted_idx',
- t.c.project_id, t.c.deleted)
- try:
- i.create(migrate_engine)
- except IntegrityError:
- pass
-
- # Based on certificate_get_all_by_user
- # from: nova/db/sqlalchemy/api.py
- i = Index('certificates_user_id_deleted_idx',
- t.c.user_id, t.c.deleted)
- try:
- i.create(migrate_engine)
- except IntegrityError:
- pass
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- t = Table('certificates', meta, autoload=True)
-
- i = Index('certificates_project_id_deleted_idx',
- t.c.project_id, t.c.deleted)
- i.drop(migrate_engine)
-
- i = Index('certificates_user_id_deleted_idx',
- t.c.user_id, t.c.deleted)
- i.drop(migrate_engine)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/124_add_indexes_to_fixed_ips.py b/nova/db/sqlalchemy/migrate_repo/versions/124_add_indexes_to_fixed_ips.py
deleted file mode 100644
index 0ae4a4d51..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/124_add_indexes_to_fixed_ips.py
+++ /dev/null
@@ -1,76 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Index, MetaData, Table
-from sqlalchemy.exc import IntegrityError
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- t = Table('fixed_ips', meta, autoload=True)
-
- # Based on network_get_all_by_host
- # from: nova/db/sqlalchemy/api.py
- i = Index('fixed_ips_host_idx', t.c.host)
- try:
- i.create(migrate_engine)
- except IntegrityError:
- pass
-
- # Based on fixed_ip_get_by_network_host
- # from: nova/db/sqlalchemy/api.py
- i = Index('fixed_ips_network_id_host_deleted_idx',
- t.c.network_id, t.c.host, t.c.deleted)
- try:
- i.create(migrate_engine)
- except IntegrityError:
- pass
-
- # Based on fixed_ip_associate
- # from: nova/db/sqlalchemy/api.py
- i = Index('fixed_ips_address_reserved_network_id_deleted_idx',
- t.c.address, t.c.reserved, t.c.network_id, t.c.deleted)
- try:
- i.create(migrate_engine)
- except IntegrityError:
- pass
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- t = Table('fixed_ips', meta, autoload=True)
-
- # Based on network_get_all_by_host
- # from: nova/db/sqlalchemy/api.py
- i = Index('fixed_ips_host_idx', t.c.host)
- i.drop(migrate_engine)
-
- # Based on fixed_ip_get_by_network_host
- # from: nova/db/sqlalchemy/api.py
- i = Index('fixed_ips_network_id_host_deleted_idx',
- t.c.network_id, t.c.host, t.c.deleted)
- i.drop(migrate_engine)
-
- # Based on fixed_ip_associate
- # from: nova/db/sqlalchemy/api.py
- i = Index('fixed_ips_address_reserved_network_id_deleted_idx',
- t.c.address, t.c.reserved, t.c.network_id, t.c.deleted)
- i.drop(migrate_engine)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/125_add_indexes_to_floating_ips.py b/nova/db/sqlalchemy/migrate_repo/versions/125_add_indexes_to_floating_ips.py
deleted file mode 100644
index b953b28b9..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/125_add_indexes_to_floating_ips.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Index, MetaData, Table
-from sqlalchemy.exc import IntegrityError
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- t = Table('floating_ips', meta, autoload=True)
-
- # Based on floating_ip_get_all_by_host
- # from: nova/db/sqlalchemy/api.py
- i = Index('floating_ips_host_idx', t.c.host)
- try:
- i.create(migrate_engine)
- except IntegrityError:
- pass
-
- # Based on floating_ip_get_all_by_project
- # from: nova/db/sqlalchemy/api.py
- i = Index('floating_ips_project_id_idx', t.c.project_id)
- try:
- i.create(migrate_engine)
- except IntegrityError:
- pass
-
- # Based on floating_ip_allocate_address
- # from: nova/db/sqlalchemy/api.py
- i = Index('floating_ips_pool_deleted_fixed_ip_id_project_id_idx',
- t.c.pool, t.c.deleted, t.c.fixed_ip_id, t.c.project_id)
- try:
- i.create(migrate_engine)
- except IntegrityError:
- pass
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- t = Table('floating_ips', meta, autoload=True)
-
- i = Index('floating_ips_host_idx', t.c.host)
- i.drop(migrate_engine)
-
- i = Index('floating_ips_project_id_idx', t.c.project_id)
- i.drop(migrate_engine)
-
- i = Index('floating_ips_pool_deleted_fixed_ip_id_project_id_idx',
- t.c.pool, t.c.deleted, t.c.fixed_ip_id, t.c.project_id)
- i.drop(migrate_engine)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/126_add_indexes_to_instance_faults.py b/nova/db/sqlalchemy/migrate_repo/versions/126_add_indexes_to_instance_faults.py
deleted file mode 100644
index 3ed8277a6..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/126_add_indexes_to_instance_faults.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Index, MetaData, Table
-from sqlalchemy.exc import IntegrityError
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- # Based on instance_fault_get_by_instance_uuids
- # from: nova/db/sqlalchemy/api.py
- t = Table('instance_faults', meta, autoload=True)
- i = Index('instance_faults_instance_uuid_deleted_created_at_idx',
- t.c.instance_uuid, t.c.deleted, t.c.created_at)
- try:
- i.create(migrate_engine)
- except IntegrityError:
- pass
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- t = Table('instance_faults', meta, autoload=True)
- i = Index('instance_faults_instance_uuid_deleted_created_at_idx',
- t.c.instance_uuid, t.c.deleted, t.c.created_at)
- i.drop(migrate_engine)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/127_add_indexes_to_instance_type_extra_specs.py b/nova/db/sqlalchemy/migrate_repo/versions/127_add_indexes_to_instance_type_extra_specs.py
deleted file mode 100644
index 80ef0f983..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/127_add_indexes_to_instance_type_extra_specs.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Index, MetaData, Table
-from sqlalchemy.exc import IntegrityError
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- # Based on instance_type_extra_specs_get_item
- # from: nova/db/sqlalchemy/api.py
- t = Table('instance_type_extra_specs', meta, autoload=True)
- i = Index('instance_type_extra_specs_instance_type_id_key_idx',
- t.c.instance_type_id, t.c.key)
- try:
- i.create(migrate_engine)
- except IntegrityError:
- pass
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- t = Table('instance_type_extra_specs', meta, autoload=True)
- i = Index('instance_type_extra_specs_instance_type_id_key_idx',
- t.c.instance_type_id, t.c.key)
- i.drop(migrate_engine)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/128_add_indexes_to_instances.py b/nova/db/sqlalchemy/migrate_repo/versions/128_add_indexes_to_instances.py
deleted file mode 100644
index a429a7685..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/128_add_indexes_to_instances.py
+++ /dev/null
@@ -1,96 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Index, MetaData, Table
-from sqlalchemy.exc import IntegrityError
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- t = Table('instances', meta, autoload=True)
-
- # Based on service_get_all_compute_sorted
- # from: nova/db/sqlalchemy/api.py
- i = Index('instances_host_deleted_idx',
- t.c.host, t.c.deleted)
- try:
- i.create(migrate_engine)
- except IntegrityError:
- pass
-
- # Based on instance_get_all_by_reservation
- # from: nova/db/sqlalchemy/api.py
- i = Index('instances_reservation_id_idx', t.c.reservation_id)
- try:
- i.create(migrate_engine)
- except IntegrityError:
- pass
-
- # Based on instance_get_active_by_window
- # from: nova/db/sqlalchemy/api.py
- i = Index('instances_terminated_at_launched_at_idx',
- t.c.terminated_at, t.c.launched_at)
- try:
- i.create(migrate_engine)
- except IntegrityError:
- pass
-
- # Based on security_group_in_use
- # from: nova/db/sqlalchemy/api.py
- i = Index('instances_uuid_deleted_idx',
- t.c.uuid, t.c.deleted)
- try:
- i.create(migrate_engine)
- except IntegrityError:
- pass
-
- # Based on instance_get_all_hung_in_rebooting
- # from: nova/db/sqlalchemy/api.py
- i = Index('instances_task_state_updated_at_idx',
- t.c.task_state, t.c.updated_at)
- try:
- i.create(migrate_engine)
- except IntegrityError:
- pass
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- t = Table('instances', meta, autoload=True)
-
- i = Index('instances_host_deleted_idx',
- t.c.host, t.c.deleted)
- i.drop(migrate_engine)
-
- i = Index('instances_reservation_id_idx', t.c.reservation_id)
- i.drop(migrate_engine)
-
- i = Index('instances_terminated_at_launched_at_idx',
- t.c.terminated_at, t.c.launched_at)
- i.drop(migrate_engine)
-
- i = Index('instances_uuid_deleted_idx',
- t.c.uuid, t.c.deleted)
- i.drop(migrate_engine)
-
- i = Index('instances_task_state_updated_at_idx',
- t.c.task_state, t.c.updated_at)
- i.drop(migrate_engine)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/129_add_indexes_to_iscsi_targets.py b/nova/db/sqlalchemy/migrate_repo/versions/129_add_indexes_to_iscsi_targets.py
deleted file mode 100644
index e904742ae..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/129_add_indexes_to_iscsi_targets.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Index, MetaData, Table
-from sqlalchemy.exc import IntegrityError
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- t = Table('iscsi_targets', meta, autoload=True)
-
- # Based on iscsi_target_count_by_host
- # from: nova/db/sqlalchemy/api.py
- i = Index('iscsi_targets_host_idx', t.c.host)
- try:
- i.create(migrate_engine)
- except IntegrityError:
- pass
-
- # Based on volume_allocate_iscsi_target
- # from: nova/db/sqlalchemy/api.py
- i = Index('iscsi_targets_host_volume_id_deleted_idx',
- t.c.host, t.c.volume_id, t.c.deleted)
- try:
- i.create(migrate_engine)
- except IntegrityError:
- pass
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- t = Table('iscsi_targets', meta, autoload=True)
-
- i = Index('iscsi_targets_host_idx', t.c.host)
- i.drop(migrate_engine)
-
- i = Index('iscsi_targets_host_volume_id_deleted_idx',
- t.c.host, t.c.volume_id, t.c.deleted)
- i.drop(migrate_engine)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/131_add_indexes_to_networks.py b/nova/db/sqlalchemy/migrate_repo/versions/131_add_indexes_to_networks.py
deleted file mode 100644
index 11a9dde86..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/131_add_indexes_to_networks.py
+++ /dev/null
@@ -1,107 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Index, MetaData, Table
-from sqlalchemy.exc import IntegrityError
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- t = Table('networks', meta, autoload=True)
-
- # Based on network_get_by_bridge
- # from: nova/db/sqlalchemy/api.py
- i = Index('networks_bridge_deleted_idx',
- t.c.bridge, t.c.deleted)
- try:
- i.create(migrate_engine)
- except IntegrityError:
- pass
-
- # Based on network_get_all_by_host
- # from: nova/db/sqlalchemy/api.py
- i = Index('networks_host_idx', t.c.host)
- try:
- i.create(migrate_engine)
- except IntegrityError:
- pass
-
- # Based on network_query
- # from: nova/db/sqlalchemy/api.py
- i = Index('networks_project_id_deleted_idx',
- t.c.project_id, t.c.deleted)
- try:
- i.create(migrate_engine)
- except IntegrityError:
- pass
-
- # Based on network_get_all_by_uuids
- # from: nova/db/sqlalchemy/api.py
- i = Index('networks_uuid_project_id_deleted_idx',
- t.c.uuid, t.c.project_id, t.c.deleted)
- try:
- i.create(migrate_engine)
- except IntegrityError:
- pass
-
- # Based on network_create_safe
- # from: nova/db/sqlalchemy/api.py
- i = Index('networks_vlan_deleted_idx',
- t.c.vlan, t.c.deleted)
- try:
- i.create(migrate_engine)
- except IntegrityError:
- pass
-
- # Based on network_get_by_cidr
- # from: nova/db/sqlalchemy/api.py
- i = Index('networks_cidr_v6_idx', t.c.cidr_v6)
- try:
- i.create(migrate_engine)
- except IntegrityError:
- pass
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- t = Table('networks', meta, autoload=True)
-
- i = Index('networks_bridge_deleted_idx',
- t.c.bridge, t.c.deleted)
- i.drop(migrate_engine)
-
- i = Index('networks_host_idx', t.c.host)
- i.drop(migrate_engine)
-
- i = Index('networks_project_id_deleted_idx',
- t.c.project_id, t.c.deleted)
- i.drop(migrate_engine)
-
- i = Index('networks_uuid_project_id_deleted_idx',
- t.c.uuid, t.c.project_id, t.c.deleted)
- i.drop(migrate_engine)
-
- i = Index('networks_vlan_deleted_idx',
- t.c.vlan, t.c.deleted)
- i.drop(migrate_engine)
-
- i = Index('networks_cidr_v6_idx', t.c.cidr_v6)
- i.drop(migrate_engine)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/132_add_instance_type_projects.py b/nova/db/sqlalchemy/migrate_repo/versions/132_add_instance_type_projects.py
deleted file mode 100644
index 312ebbfc1..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/132_add_instance_type_projects.py
+++ /dev/null
@@ -1,67 +0,0 @@
-# Copyright 2012 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Boolean, Column, DateTime, String, ForeignKey, Integer
-from sqlalchemy import MetaData, String, Table
-
-from nova.openstack.common import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- instance_types = Table('instance_types', meta, autoload=True)
- is_public = Column('is_public', Boolean)
-
- instance_types.create_column(is_public)
- instance_types.update().values(is_public=True).execute()
-
- # New table.
- instance_type_projects = Table('instance_type_projects', meta,
- Column('created_at', DateTime(timezone=False)),
- Column('updated_at', DateTime(timezone=False)),
- Column('deleted_at', DateTime(timezone=False)),
- Column('deleted', Boolean(), default=False),
- Column('id', Integer, primary_key=True, nullable=False),
- Column('instance_type_id',
- Integer,
- ForeignKey('instance_types.id'),
- nullable=False),
- Column('project_id', String(length=255)),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- try:
- instance_type_projects.create()
- except Exception:
- LOG.error(_("Table |%s| not created!"), repr(instance_type_projects))
- raise
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- instance_types = Table('instance_types', meta, autoload=True)
- is_public = Column('is_public', Boolean)
-
- instance_types.drop_column(is_public)
-
- instance_type_projects = Table(
- 'instance_type_projects', meta, autoload=True)
- instance_type_projects.drop()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/133_aggregate_delete_fix.py b/nova/db/sqlalchemy/migrate_repo/versions/133_aggregate_delete_fix.py
deleted file mode 100644
index b6cf56d47..000000000
--- a/nova/db/sqlalchemy/migrate_repo/versions/133_aggregate_delete_fix.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# Copyright 2012 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import String, Column, MetaData, Table
-from migrate.changeset import UniqueConstraint
-
-from nova.openstack.common import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- dialect = migrate_engine.url.get_dialect().name
-
- aggregates = Table('aggregates', meta, autoload=True)
- if dialect.startswith('sqlite'):
- aggregates.c.name.alter(unique=False)
- elif dialect.startswith('postgres'):
- ucon = UniqueConstraint('name',
- name='aggregates_name_key',
- table=aggregates)
- ucon.drop()
-
- else:
- col2 = aggregates.c.name
- UniqueConstraint(col2, name='name').drop()
-
-
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- aggregates = Table('aggregates', meta, autoload=True)
- aggregates.c.name.alter(unique=True)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/082_essex.py b/nova/db/sqlalchemy/migrate_repo/versions/133_folsom.py
index 971fa3626..44eac3695 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/082_essex.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/133_folsom.py
@@ -14,21 +14,28 @@
# License for the specific language governing permissions and limitations
# under the License.
+from migrate.changeset import UniqueConstraint
from migrate import ForeignKeyConstraint
from sqlalchemy import Boolean, BigInteger, Column, DateTime, Float, ForeignKey
from sqlalchemy import Index, Integer, MetaData, String, Table, Text
+from sqlalchemy import dialects
-from nova import flags
from nova.openstack.common import log as logging
-FLAGS = flags.FLAGS
-
LOG = logging.getLogger(__name__)
# Note on the autoincrement flag: this is defaulted for primary key columns
# of integral type, so is no longer set explicitly in such cases.
+# NOTE(dprince): This wrapper allows us to easily match the Folsom MySQL
+# Schema. In Folsom we created tables as latin1 and converted them to utf8
+# later. This conversion causes some of the Text columns on MySQL to get
+# created as mediumtext instead of just text.
+def MediumText():
+ return Text().with_variant(dialects.mysql.MEDIUMTEXT(), 'mysql')
+
+
def _populate_instance_types(instance_types_table):
default_inst_types = {
'm1.tiny': dict(mem=512, vcpus=1, root_gb=0, eph_gb=0, flavid=1),
@@ -47,10 +54,12 @@ def _populate_instance_types(instance_types_table):
'ephemeral_gb': values["eph_gb"],
'rxtx_factor': 1,
'swap': 0,
- 'flavorid': values["flavid"]})
+ 'flavorid': values["flavid"],
+ 'disabled': False,
+ 'is_public': True})
except Exception:
LOG.info(repr(instance_types_table))
- LOG.exception('Exception while seeding instance_types table')
+ LOG.exception(_('Exception while seeding instance_types table'))
raise
@@ -71,7 +80,7 @@ def upgrade(migrate_engine):
Column('url', String(length=255)),
Column('md5hash', String(length=255)),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
aggregate_hosts = Table('aggregate_hosts', meta,
@@ -80,11 +89,11 @@ def upgrade(migrate_engine):
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
- Column('host', String(length=255), unique=True),
+ Column('host', String(length=255)),
Column('aggregate_id', Integer, ForeignKey('aggregates.id'),
nullable=False),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
aggregate_metadata = Table('aggregate_metadata', meta,
@@ -98,7 +107,7 @@ def upgrade(migrate_engine):
Column('key', String(length=255), nullable=False),
Column('value', String(length=255), nullable=False),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
aggregates = Table('aggregates', meta,
@@ -107,26 +116,10 @@ def upgrade(migrate_engine):
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
- Column('name', String(length=255), unique=True),
- Column('operational_state', String(length=255), nullable=False),
+ Column('name', String(length=255)),
Column('availability_zone', String(length=255), nullable=False),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
- )
-
- auth_tokens = Table('auth_tokens', meta,
- Column('created_at', DateTime),
- Column('updated_at', DateTime),
- Column('deleted_at', DateTime),
- Column('deleted', Boolean),
- Column('token_hash', String(length=255), primary_key=True,
- nullable=False),
- Column('user_id', String(length=255)),
- Column('server_management_url', String(length=255)),
- Column('storage_url', String(length=255)),
- Column('cdn_management_url', String(length=255)),
- mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
block_device_mapping = Table('block_device_mapping', meta,
@@ -135,20 +128,17 @@ def upgrade(migrate_engine):
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
- Column('instance_id', Integer, ForeignKey('instances.id'),
- nullable=False),
Column('device_name', String(length=255), nullable=False),
Column('delete_on_termination', Boolean),
Column('virtual_name', String(length=255)),
- Column('snapshot_id', Integer, ForeignKey('snapshots.id'),
- nullable=True),
- Column('volume_id', Integer(), ForeignKey('volumes.id'),
- nullable=True),
+ Column('snapshot_id', String(length=36), nullable=True),
+ Column('volume_id', String(length=36), nullable=True),
Column('volume_size', Integer),
Column('no_device', Boolean),
- Column('connection_info', Text),
+ Column('connection_info', MediumText()),
+ Column('instance_uuid', String(length=36)),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
bw_usage_cache = Table('bw_usage_cache', meta,
@@ -162,8 +152,9 @@ def upgrade(migrate_engine):
Column('bw_in', BigInteger),
Column('bw_out', BigInteger),
Column('mac', String(length=255)),
+ Column('uuid', String(length=36)),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
cells = Table('cells', meta,
@@ -196,7 +187,20 @@ def upgrade(migrate_engine):
Column('project_id', String(length=255)),
Column('file_name', String(length=255)),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
+ )
+
+ compute_node_stats = Table('compute_node_stats', meta,
+ Column('created_at', DateTime),
+ Column('updated_at', DateTime),
+ Column('deleted_at', DateTime),
+ Column('deleted', Boolean),
+ Column('id', Integer, primary_key=True, nullable=False),
+ Column('compute_node_id', Integer, nullable=False),
+ Column('key', String(length=255), nullable=False),
+ Column('value', String(length=255)),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8'
)
compute_nodes = Table('compute_nodes', meta,
@@ -212,9 +216,9 @@ def upgrade(migrate_engine):
Column('vcpus_used', Integer, nullable=False),
Column('memory_mb_used', Integer, nullable=False),
Column('local_gb_used', Integer, nullable=False),
- Column('hypervisor_type', Text, nullable=False),
+ Column('hypervisor_type', MediumText(), nullable=False),
Column('hypervisor_version', Integer, nullable=False),
- Column('cpu_info', Text, nullable=False),
+ Column('cpu_info', MediumText(), nullable=False),
Column('disk_available_least', Integer),
Column('free_ram_mb', Integer),
Column('free_disk_gb', Integer),
@@ -222,7 +226,7 @@ def upgrade(migrate_engine):
Column('running_vms', Integer),
Column('hypervisor_hostname', String(length=255)),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
console_pools = Table('console_pools', meta,
@@ -239,7 +243,7 @@ def upgrade(migrate_engine):
Column('host', String(length=255)),
Column('compute_host', String(length=255)),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
consoles = Table('consoles', meta,
@@ -249,31 +253,25 @@ def upgrade(migrate_engine):
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
Column('instance_name', String(length=255)),
- Column('instance_id', Integer),
Column('password', String(length=255)),
Column('port', Integer),
Column('pool_id', Integer, ForeignKey('console_pools.id')),
+ Column('instance_uuid', String(length=36)),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
- # NOTE(dprince): Trying to create a fresh utf8 dns_domains tables
- # with a domain primary key length of 512 fails w/
- # 'Specified key was too long; max key length is 767 bytes'.
- # See: https://bugs.launchpad.net/nova/+bug/993663
- # If we fix this during Folsom we can set mysql_charset=utf8 inline...
- # and remove the unsightly loop that does it below during "E" compaction.
dns_domains = Table('dns_domains', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
- Column('domain', String(length=512), primary_key=True, nullable=False),
+ Column('domain', String(length=255), primary_key=True, nullable=False),
Column('scope', String(length=255)),
Column('availability_zone', String(length=255)),
- Column('project_id', String(length=255), ForeignKey('projects.id')),
+ Column('project_id', String(length=255)),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
fixed_ips = Table('fixed_ips', meta,
@@ -284,14 +282,14 @@ def upgrade(migrate_engine):
Column('id', Integer, primary_key=True, nullable=False),
Column('address', String(length=255)),
Column('network_id', Integer),
- Column('instance_id', Integer),
Column('allocated', Boolean),
Column('leased', Boolean),
Column('reserved', Boolean),
Column('virtual_interface_id', Integer),
Column('host', String(length=255)),
+ Column('instance_uuid', String(length=36)),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
floating_ips = Table('floating_ips', meta,
@@ -308,34 +306,32 @@ def upgrade(migrate_engine):
Column('pool', String(length=255)),
Column('interface', String(length=255)),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
- instance_actions = Table('instance_actions', meta,
+ instance_faults = Table('instance_faults', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
- Column('action', String(length=255)),
- Column('error', Text),
Column('instance_uuid', String(length=36)),
+ Column('code', Integer, nullable=False),
+ Column('message', String(length=255)),
+ Column('details', MediumText()),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
- instance_faults = Table('instance_faults', meta,
+ instance_id_mappings = Table('instance_id_mappings', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
- Column('instance_uuid', String(length=36)),
- Column('code', Integer, nullable=False),
- Column('message', String(length=255)),
- Column('details', Text),
+ Column('uuid', String(36), nullable=False),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
instance_info_caches = Table('instance_info_caches', meta,
@@ -344,10 +340,10 @@ def upgrade(migrate_engine):
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
- Column('network_info', Text),
- Column('instance_id', String(36), nullable=False, unique=True),
+ Column('network_info', MediumText()),
+ Column('instance_uuid', String(length=36), nullable=False),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
instance_metadata = Table('instance_metadata', meta,
@@ -356,12 +352,24 @@ def upgrade(migrate_engine):
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
- Column('instance_id', Integer, ForeignKey('instances.id'),
- nullable=False),
Column('key', String(length=255)),
Column('value', String(length=255)),
+ Column('instance_uuid', String(length=36), nullable=True),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8'
+ )
+
+ instance_system_metadata = Table('instance_system_metadata', meta,
+ Column('created_at', DateTime),
+ Column('updated_at', DateTime),
+ Column('deleted_at', DateTime),
+ Column('deleted', Boolean),
+ Column('id', Integer, primary_key=True, nullable=False),
+ Column('instance_uuid', String(length=36), nullable=False),
+ Column('key', String(length=255), nullable=False),
+ Column('value', String(length=255)),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
instance_type_extra_specs = Table('instance_type_extra_specs', meta,
@@ -375,7 +383,19 @@ def upgrade(migrate_engine):
Column('key', String(length=255)),
Column('value', String(length=255)),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
+ )
+
+ instance_type_projects = Table('instance_type_projects', meta,
+ Column('created_at', DateTime),
+ Column('updated_at', DateTime),
+ Column('deleted_at', DateTime),
+ Column('deleted', Boolean),
+ Column('id', Integer, primary_key=True, nullable=False),
+ Column('instance_type_id', Integer, nullable=False),
+ Column('project_id', String(length=255)),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8'
)
instance_types = Table('instance_types', meta,
@@ -393,8 +413,10 @@ def upgrade(migrate_engine):
Column('rxtx_factor', Float),
Column('root_gb', Integer),
Column('ephemeral_gb', Integer),
+ Column('disabled', Boolean),
+ Column('is_public', Boolean),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
instances = Table('instances', meta,
@@ -412,14 +434,14 @@ def upgrade(migrate_engine):
Column('server_name', String(length=255)),
Column('launch_index', Integer),
Column('key_name', String(length=255)),
- Column('key_data', Text),
+ Column('key_data', MediumText()),
Column('power_state', Integer),
Column('vm_state', String(length=255)),
Column('memory_mb', Integer),
Column('vcpus', Integer),
Column('hostname', String(length=255)),
Column('host', String(length=255)),
- Column('user_data', Text),
+ Column('user_data', MediumText()),
Column('reservation_id', String(length=255)),
Column('scheduled_at', DateTime),
Column('launched_at', DateTime),
@@ -429,7 +451,7 @@ def upgrade(migrate_engine):
Column('availability_zone', String(length=255)),
Column('locked', Boolean),
Column('os_type', String(length=255)),
- Column('launched_on', Text),
+ Column('launched_on', MediumText()),
Column('instance_type_id', Integer),
Column('vm_mode', String(length=255)),
Column('uuid', String(length=36)),
@@ -449,7 +471,7 @@ def upgrade(migrate_engine):
Column('ephemeral_gb', Integer),
Column('cell_name', String(length=255)),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
iscsi_targets = Table('iscsi_targets', meta,
@@ -460,10 +482,9 @@ def upgrade(migrate_engine):
Column('id', Integer, primary_key=True, nullable=False),
Column('target_num', Integer),
Column('host', String(length=255)),
- Column('volume_id', Integer, ForeignKey('volumes.id'),
- nullable=True),
+ Column('volume_id', String(length=36), nullable=True),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
key_pairs = Table('key_pairs', meta,
@@ -475,9 +496,9 @@ def upgrade(migrate_engine):
Column('name', String(length=255)),
Column('user_id', String(length=255)),
Column('fingerprint', String(length=255)),
- Column('public_key', Text),
+ Column('public_key', MediumText()),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
migrations = Table('migrations', meta,
@@ -494,7 +515,7 @@ def upgrade(migrate_engine):
Column('old_instance_type_id', Integer),
Column('new_instance_type_id', Integer),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
networks = Table('networks', meta,
@@ -528,34 +549,49 @@ def upgrade(migrate_engine):
Column('priority', Integer),
Column('rxtx_base', Integer),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
- projects = Table('projects', meta,
+ provider_fw_rules = Table('provider_fw_rules', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
- Column('id', String(length=255), primary_key=True, nullable=False),
- Column('name', String(length=255)),
- Column('description', String(length=255)),
- Column('project_manager', String(length=255), ForeignKey('users.id')),
+ Column('id', Integer, primary_key=True, nullable=False),
+ Column('protocol', String(length=5)),
+ Column('from_port', Integer),
+ Column('to_port', Integer),
+ Column('cidr', String(length=255)),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
- provider_fw_rules = Table('provider_fw_rules', meta,
+ quota_classes = Table('quota_classes', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
- Column('protocol', String(length=5)),
- Column('from_port', Integer),
- Column('to_port', Integer),
- Column('cidr', String(length=255)),
+ Column('class_name', String(length=255)),
+ Column('resource', String(length=255)),
+ Column('hard_limit', Integer),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
+ )
+
+ quota_usages = Table('quota_usages', meta,
+ Column('created_at', DateTime),
+ Column('updated_at', DateTime),
+ Column('deleted_at', DateTime),
+ Column('deleted', Boolean),
+ Column('id', Integer, primary_key=True, nullable=False),
+ Column('project_id', String(length=255)),
+ Column('resource', String(length=255)),
+ Column('in_use', Integer, nullable=False),
+ Column('reserved', Integer, nullable=False),
+ Column('until_refresh', Integer),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8'
)
quotas = Table('quotas', meta,
@@ -568,7 +604,23 @@ def upgrade(migrate_engine):
Column('resource', String(length=255), nullable=False),
Column('hard_limit', Integer),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
+ )
+
+ reservations = Table('reservations', meta,
+ Column('created_at', DateTime),
+ Column('updated_at', DateTime),
+ Column('deleted_at', DateTime),
+ Column('deleted', Boolean),
+ Column('id', Integer, primary_key=True, nullable=False),
+ Column('uuid', String(length=36), nullable=False),
+ Column('usage_id', Integer, nullable=False),
+ Column('project_id', String(length=255)),
+ Column('resource', String(length=255)),
+ Column('delta', Integer, nullable=False),
+ Column('expire', DateTime),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8'
)
s3_images = Table('s3_images', meta,
@@ -579,7 +631,7 @@ def upgrade(migrate_engine):
Column('id', Integer, primary_key=True, nullable=False),
Column('uuid', String(length=36), nullable=False),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
security_group_instance_association = \
@@ -590,9 +642,9 @@ def upgrade(migrate_engine):
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
Column('security_group_id', Integer, ForeignKey('security_groups.id')),
- Column('instance_id', Integer, ForeignKey('instances.id')),
+ Column('instance_uuid', String(length=36)),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
security_group_rules = Table('security_group_rules', meta,
@@ -608,7 +660,7 @@ def upgrade(migrate_engine):
Column('cidr', String(length=255)),
Column('group_id', Integer, ForeignKey('security_groups.id')),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
security_groups = Table('security_groups', meta,
@@ -622,7 +674,7 @@ def upgrade(migrate_engine):
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
services = Table('services', meta,
@@ -638,19 +690,7 @@ def upgrade(migrate_engine):
Column('disabled', Boolean),
Column('availability_zone', String(length=255)),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
- )
-
- sm_flavors = Table('sm_flavors', meta,
- Column('created_at', DateTime),
- Column('updated_at', DateTime),
- Column('deleted_at', DateTime),
- Column('deleted', Boolean),
- Column('id', Integer, primary_key=True, nullable=False),
- Column('label', String(length=255)),
- Column('description', String(length=255)),
- mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
sm_backend_config = Table('sm_backend_config', meta,
@@ -665,94 +705,80 @@ def upgrade(migrate_engine):
Column('sr_type', String(length=255)),
Column('config_params', String(length=2047)),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
- )
-
- sm_volume = Table('sm_volume', meta,
- Column('created_at', DateTime),
- Column('updated_at', DateTime),
- Column('deleted_at', DateTime),
- Column('deleted', Boolean),
- Column('id', Integer(), ForeignKey('volumes.id'), primary_key=True,
- nullable=False, autoincrement=False),
- Column('backend_id', Integer, ForeignKey('sm_backend_config.id'),
- nullable=False),
- Column('vdi_uuid', String(length=255)),
- mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
- snapshots = Table('snapshots', meta,
+ sm_flavors = Table('sm_flavors', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
- Column('volume_id', Integer, nullable=False),
- Column('user_id', String(length=255)),
- Column('project_id', String(length=255)),
- Column('status', String(length=255)),
- Column('progress', String(length=255)),
- Column('volume_size', Integer),
- Column('scheduled_at', DateTime),
- Column('display_name', String(length=255)),
- Column('display_description', String(length=255)),
+ Column('label', String(length=255)),
+ Column('description', String(length=255)),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
- user_project_association = Table('user_project_association', meta,
+ sm_volume = Table('sm_volume', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
- Column('user_id', String(length=255), primary_key=True,
- nullable=False),
- Column('project_id', String(length=255), primary_key=True,
- nullable=False),
+ Column('id', String(length=36), primary_key=True,
+ nullable=False, autoincrement=False),
+ Column('backend_id', Integer, nullable=False),
+ Column('vdi_uuid', String(length=255)),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
- user_project_role_association = \
- Table('user_project_role_association', meta,
+ snapshot_id_mappings = Table('snapshot_id_mappings', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
- Column('user_id', String(length=255), primary_key=True,
- nullable=False),
- Column('project_id', String(length=255), primary_key=True,
- nullable=False),
- Column('role', String(length=255), primary_key=True, nullable=False),
+ Column('id', Integer, primary_key=True, nullable=False),
+ Column('uuid', String(length=36), nullable=False),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
- user_role_association = Table('user_role_association', meta,
+ snapshots = Table('snapshots', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
- Column('user_id', String(length=255), ForeignKey('users.id'),
- primary_key=True, nullable=False),
- Column('role', String(length=255), primary_key=True, nullable=False),
+ Column('id', String(length=36), primary_key=True, nullable=False),
+ Column('volume_id', String(length=36), nullable=False),
+ Column('user_id', String(length=255)),
+ Column('project_id', String(length=255)),
+ Column('status', String(length=255)),
+ Column('progress', String(length=255)),
+ Column('volume_size', Integer),
+ Column('scheduled_at', DateTime),
+ Column('display_name', String(length=255)),
+ Column('display_description', String(length=255)),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
- users = Table('users', meta,
+ task_log = Table('task_log', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
- Column('id', String(length=255), primary_key=True, nullable=False),
- Column('name', String(length=255)),
- Column('access_key', String(length=255)),
- Column('secret_key', String(length=255)),
- Column('is_admin', Boolean),
+ Column('id', Integer, primary_key=True, nullable=False),
+ Column('task_name', String(length=255), nullable=False),
+ Column('state', String(length=255), nullable=False),
+ Column('host', String(length=255), nullable=False),
+ Column('period_beginning', String(length=255), nullable=False),
+ Column('period_ending', String(length=255), nullable=False),
+ Column('message', String(length=255), nullable=False),
+ Column('task_items', Integer),
+ Column('errors', Integer),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
virtual_interfaces = Table('virtual_interfaces', meta,
@@ -763,10 +789,10 @@ def upgrade(migrate_engine):
Column('id', Integer, primary_key=True, nullable=False),
Column('address', String(length=255), unique=True),
Column('network_id', Integer),
- Column('instance_id', Integer, nullable=False),
Column('uuid', String(length=36)),
+ Column('instance_uuid', String(length=36), nullable=True),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
virtual_storage_arrays = Table('virtual_storage_arrays', meta,
@@ -785,18 +811,18 @@ def upgrade(migrate_engine):
Column('vol_count', Integer, nullable=False),
Column('status', String(length=255)),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
- volume_types = Table('volume_types', meta,
+ volume_id_mappings = Table('volume_id_mappings', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
- Column('name', String(length=255)),
+ Column('uuid', String(length=36), nullable=False),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
volume_metadata = Table('volume_metadata', meta,
@@ -805,12 +831,11 @@ def upgrade(migrate_engine):
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
- Column('volume_id', Integer, ForeignKey('volumes.id'),
- nullable=False),
+ Column('volume_id', String(length=36), nullable=False),
Column('key', String(length=255)),
Column('value', String(length=255)),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
volume_type_extra_specs = Table('volume_type_extra_specs', meta,
@@ -824,24 +849,33 @@ def upgrade(migrate_engine):
Column('key', String(length=255)),
Column('value', String(length=255)),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
- volumes = Table('volumes', meta,
+ volume_types = Table('volume_types', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
+ Column('name', String(length=255)),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8'
+ )
+
+ volumes = Table('volumes', meta,
+ Column('created_at', DateTime),
+ Column('updated_at', DateTime),
+ Column('deleted_at', DateTime),
+ Column('deleted', Boolean),
+ Column('id', String(length=36), primary_key=True, nullable=False),
Column('ec2_id', String(length=255)),
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
Column('host', String(length=255)),
Column('size', Integer),
Column('availability_zone', String(length=255)),
- Column('instance_id', Integer, ForeignKey('instances.id')),
Column('mountpoint', String(length=255)),
- Column('attach_time', String(length=255)),
Column('status', String(length=255)),
Column('attach_status', String(length=255)),
Column('scheduled_at', DateTime),
@@ -851,10 +885,12 @@ def upgrade(migrate_engine):
Column('display_description', String(length=255)),
Column('provider_location', String(length=256)),
Column('provider_auth', String(length=256)),
- Column('snapshot_id', Integer),
+ Column('snapshot_id', String(length=36)),
Column('volume_type_id', Integer),
+ Column('instance_uuid', String(length=36)),
+ Column('attach_time', DateTime),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
+ mysql_charset='utf8'
)
instances.create()
@@ -863,21 +899,24 @@ def upgrade(migrate_engine):
# create all tables
tables = [aggregates, console_pools, instance_types,
- users, projects, security_groups, sm_flavors, sm_backend_config,
- snapshots, user_project_association, volume_types,
+ security_groups, sm_flavors, sm_backend_config,
+ snapshots, volume_types,
volumes,
# those that are children and others later
agent_builds, aggregate_hosts, aggregate_metadata,
- auth_tokens, block_device_mapping, bw_usage_cache, cells,
- certificates, compute_nodes, consoles, dns_domains, fixed_ips,
- floating_ips, instance_actions, instance_faults,
- instance_info_caches, instance_metadata,
- instance_type_extra_specs, iscsi_targets, key_pairs,
- migrations, networks, provider_fw_rules,
- quotas, s3_images, security_group_instance_association,
+ block_device_mapping, bw_usage_cache, cells,
+ certificates, compute_node_stats, compute_nodes, consoles,
+ dns_domains, fixed_ips, floating_ips,
+ instance_faults, instance_id_mappings, instance_info_caches,
+ instance_metadata, instance_system_metadata,
+ instance_type_extra_specs, instance_type_projects,
+ iscsi_targets, key_pairs, migrations, networks,
+ provider_fw_rules, quota_classes, quota_usages, quotas,
+ reservations, s3_images, security_group_instance_association,
security_group_rules, services, sm_volume,
- user_project_role_association, user_role_association,
- virtual_interfaces, virtual_storage_arrays, volume_metadata,
+ snapshot_id_mappings, task_log,
+ virtual_interfaces,
+ virtual_storage_arrays, volume_id_mappings, volume_metadata,
volume_type_extra_specs]
for table in tables:
@@ -885,103 +924,302 @@ def upgrade(migrate_engine):
table.create()
except Exception:
LOG.info(repr(table))
- LOG.exception('Exception while creating table.')
+ LOG.exception(_('Exception while creating table.'))
raise
- # MySQL specific Indexes from Essex
- # NOTE(dprince): I think some of these can be removed in Folsom
indexes = [
+ # agent_builds
+ Index('agent_builds_hypervisor_os_arch_idx',
+ agent_builds.c.hypervisor,
+ agent_builds.c.os,
+ agent_builds.c.architecture),
+
+ # aggregate_metadata
+ Index('aggregate_metadata_key_idx', aggregate_metadata.c.key),
+
+ # block_device_mapping
+ Index('block_device_mapping_instance_uuid_idx',
+ block_device_mapping.c.instance_uuid),
+
+ Index('block_device_mapping_instance_uuid_device_name_idx',
+ block_device_mapping.c.instance_uuid,
+ block_device_mapping.c.device_name),
+ Index(
+ 'block_device_mapping_instance_uuid_virtual_name_device_name_idx',
+ block_device_mapping.c.instance_uuid,
+ block_device_mapping.c.virtual_name,
+ block_device_mapping.c.device_name),
+
+ Index('block_device_mapping_instance_uuid_volume_id_idx',
+ block_device_mapping.c.instance_uuid,
+ block_device_mapping.c.volume_id),
+
+ # bw_usage_cache
+ Index('bw_usage_cache_uuid_start_period_idx',
+ bw_usage_cache.c.uuid, bw_usage_cache.c.start_period),
+
+ # certificates
+ Index('certificates_project_id_deleted_idx',
+ certificates.c.project_id, certificates.c.deleted),
+
+ Index('certificates_user_id_deleted_idx',
+ certificates.c.user_id, certificates.c.deleted),
+
+ # compute_node_stats
+ Index('ix_compute_node_stats_compute_node_id',
+ compute_node_stats.c.compute_node_id),
+
+ # consoles
+ Index('consoles_instance_uuid_idx', consoles.c.instance_uuid),
+
+ # dns_domains
+ Index('dns_domains_domain_deleted_idx',
+ dns_domains.c.domain, dns_domains.c.deleted),
+
+ # fixed_ips
+ Index('fixed_ips_host_idx', fixed_ips.c.host),
+ Index('fixed_ips_network_id_host_deleted_idx',
+ fixed_ips.c.network_id, fixed_ips.c.host, fixed_ips.c.deleted),
+ Index('fixed_ips_address_reserved_network_id_deleted_idx',
+ fixed_ips.c.address, fixed_ips.c.reserved,
+ fixed_ips.c.network_id, fixed_ips.c.deleted),
+
+ # floating_ips
+ Index('floating_ips_host_idx', floating_ips.c.host),
+
+ Index('floating_ips_project_id_idx', floating_ips.c.project_id),
+
+ Index('floating_ips_pool_deleted_fixed_ip_id_project_id_idx',
+ floating_ips.c.pool, floating_ips.c.deleted,
+ floating_ips.c.fixed_ip_id, floating_ips.c.project_id),
+
+ # instance_faults
+ Index('instance_faults_instance_uuid_deleted_created_at_idx',
+ instance_faults.c.instance_uuid, instance_faults.c.deleted,
+ instance_faults.c.created_at),
+
+ # instance_type_extra_specs
+ Index('instance_type_extra_specs_instance_type_id_key_idx',
+ instance_type_extra_specs.c.instance_type_id,
+ instance_type_extra_specs.c.key),
+
+ # instance_id_mappings
+ Index('ix_instance_id_mappings_uuid', instance_id_mappings.c.uuid),
+
+ # instance_metadata
+ Index('instance_metadata_instance_uuid_idx',
+ instance_metadata.c.instance_uuid),
+
+ # instances
+ Index('instances_host_deleted_idx',
+ instances.c.host, instances.c.deleted),
+
+ Index('instances_reservation_id_idx', instances.c.reservation_id),
+
+ Index('instances_terminated_at_launched_at_idx',
+ instances.c.terminated_at, instances.c.launched_at),
+
+ Index('instances_uuid_deleted_idx',
+ instances.c.uuid, instances.c.deleted),
+
+ Index('instances_task_state_updated_at_idx',
+ instances.c.task_state, instances.c.updated_at),
+
+
+ # iscsi_targets
+ Index('iscsi_targets_host_idx', iscsi_targets.c.host),
+
+ Index('iscsi_targets_host_volume_id_deleted_idx',
+ iscsi_targets.c.host, iscsi_targets.c.volume_id,
+ iscsi_targets.c.deleted),
+
+ # key_pairs
+ Index('key_pair_user_id_name_idx',
+ key_pairs.c.user_id, key_pairs.c.name),
+
+ # networks
+ Index('networks_bridge_deleted_idx',
+ networks.c.bridge, networks.c.deleted),
+
+ Index('networks_host_idx', networks.c.host),
+
+ Index('networks_project_id_deleted_idx',
+ networks.c.project_id, networks.c.deleted),
+
+ Index('networks_uuid_project_id_deleted_idx',
+ networks.c.uuid, networks.c.project_id, networks.c.deleted),
+
+ Index('networks_vlan_deleted_idx',
+ networks.c.vlan, networks.c.deleted),
+
+ Index('networks_cidr_v6_idx', networks.c.cidr_v6),
+
+ # reservations
+ Index('ix_reservations_project_id', reservations.c.project_id),
+
+ # security_group_instance_association
+ Index('security_group_instance_association_instance_uuid_idx',
+ security_group_instance_association.c.instance_uuid),
+
+ # quota_classes
+ Index('ix_quota_classes_class_name', quota_classes.c.class_name),
+
+ # quota_usages
+ Index('ix_quota_usages_project_id', quota_usages.c.project_id),
+
+
+ # volumes
+ Index('volumes_instance_uuid_idx', volumes.c.instance_uuid),
+
+ # task_log
+ Index('ix_task_log_period_beginning', task_log.c.period_beginning),
+ Index('ix_task_log_host', task_log.c.host),
+ Index('ix_task_log_period_ending', task_log.c.period_ending),
+
+ ]
+
+ mysql_indexes = [
+ # TODO(dprince): review these for removal. Some of these indexes
+ # were automatically created by SQLAlchemy migrate and *may* no longer
+ # be in use
+ Index('instance_type_id', instance_type_projects.c.instance_type_id),
+ Index('project_id', dns_domains.c.project_id),
+ Index('fixed_ip_id', floating_ips.c.fixed_ip_id),
+ Index('backend_id', sm_volume.c.backend_id),
+ Index('network_id', virtual_interfaces.c.network_id),
Index('network_id', fixed_ips.c.network_id),
- Index('instance_id', fixed_ips.c.instance_id),
Index('fixed_ips_virtual_interface_id_fkey',
fixed_ips.c.virtual_interface_id),
- Index('fixed_ip_id', floating_ips.c.fixed_ip_id),
- Index('project_id', user_project_association.c.project_id),
- Index('network_id', virtual_interfaces.c.network_id),
- Index('instance_id', virtual_interfaces.c.instance_id),
+ Index('address', fixed_ips.c.address),
+ Index('fixed_ips_instance_uuid_fkey', fixed_ips.c.instance_uuid),
+ Index('instance_uuid', instance_system_metadata.c.instance_uuid),
+ Index('iscsi_targets_volume_id_fkey', iscsi_targets.c.volume_id),
+ Index('snapshot_id', block_device_mapping.c.snapshot_id),
+ Index('usage_id', reservations.c.usage_id),
+ Index('virtual_interfaces_instance_uuid_fkey',
+ virtual_interfaces.c.instance_uuid),
+ Index('volume_id', block_device_mapping.c.volume_id),
+ Index('volume_metadata_volume_id_fkey', volume_metadata.c.volume_id),
]
+ # MySQL specific indexes
if migrate_engine.name == 'mysql':
+ for index in mysql_indexes:
+ index.create(migrate_engine)
+
+ # PostgreSQL specific indexes
+ if migrate_engine.name == 'postgresql':
+ Index('address', fixed_ips.c.address).create()
+
+ # Common indexes
+ if migrate_engine.name == 'mysql' or migrate_engine.name == 'postgresql':
for index in indexes:
index.create(migrate_engine)
fkeys = [
- [[user_project_role_association.c.user_id,
- user_project_role_association.c.project_id],
- [user_project_association.c.user_id,
- user_project_association.c.project_id],
- 'user_project_role_association_ibfk_1'],
- [[user_project_association.c.user_id],
- [users.c.id], 'user_project_association_ibfk_1'],
- [[user_project_association.c.project_id], [projects.c.id],
- 'user_project_association_ibfk_2'],
- [[instance_info_caches.c.instance_id], [instances.c.uuid],
- 'instance_info_caches_ibfk_1'],
+
+ [[fixed_ips.c.instance_uuid],
+ [instances.c.uuid],
+ 'fixed_ips_instance_uuid_fkey'],
+ [[block_device_mapping.c.instance_uuid],
+ [instances.c.uuid],
+ 'block_device_mapping_instance_uuid_fkey'],
+ [[consoles.c.instance_uuid],
+ [instances.c.uuid],
+ 'consoles_instance_uuid_fkey'],
+ [[instance_info_caches.c.instance_uuid],
+ [instances.c.uuid],
+ 'instance_info_caches_instance_uuid_fkey'],
+ [[instance_metadata.c.instance_uuid],
+ [instances.c.uuid],
+ 'instance_metadata_instance_uuid_fkey'],
+ [[instance_system_metadata.c.instance_uuid],
+ [instances.c.uuid],
+ 'instance_system_metadata_ibfk_1'],
+ [[instance_type_projects.c.instance_type_id],
+ [instance_types.c.id],
+ 'instance_type_projects_ibfk_1'],
+ [[iscsi_targets.c.volume_id],
+ [volumes.c.id],
+ 'iscsi_targets_volume_id_fkey'],
+ [[reservations.c.usage_id],
+ [quota_usages.c.id],
+ 'reservations_ibfk_1'],
+ [[security_group_instance_association.c.instance_uuid],
+ [instances.c.uuid],
+ 'security_group_instance_association_instance_uuid_fkey'],
+ [[sm_volume.c.backend_id],
+ [sm_backend_config.c.id],
+ 'sm_volume_ibfk_2'],
+ [[sm_volume.c.id],
+ [volumes.c.id],
+ 'sm_volume_id_fkey'],
+ [[virtual_interfaces.c.instance_uuid],
+ [instances.c.uuid],
+ 'virtual_interfaces_instance_uuid_fkey'],
+ [[volume_metadata.c.volume_id],
+ [volumes.c.id],
+ 'volume_metadata_volume_id_fkey'],
+
]
for fkey_pair in fkeys:
if migrate_engine.name == 'mysql':
- # For MySQL we name our fkeys explicitly so they match Essex
+ # For MySQL we name our fkeys explicitly so they match Folsom
fkey = ForeignKeyConstraint(columns=fkey_pair[0],
refcolumns=fkey_pair[1],
name=fkey_pair[2])
fkey.create()
elif migrate_engine.name == 'postgresql':
+ # PostgreSQL names things like it wants (correct and compatible!)
fkey = ForeignKeyConstraint(columns=fkey_pair[0],
refcolumns=fkey_pair[1])
fkey.create()
- # Hopefully this entire loop to set the charset can go away during
- # the "E" release compaction. See the notes on the dns_domains
- # table above for why this is required vs. setting mysql_charset inline.
if migrate_engine.name == "mysql":
- tables = [
- # tables that are FK parents, must be converted early
- "aggregates", "console_pools", "instance_types", "instances",
- "projects", "security_groups", "sm_backend_config", "sm_flavors",
- "snapshots", "user_project_association", "users", "volume_types",
- "volumes",
- # those that are children and others later
- "agent_builds", "aggregate_hosts", "aggregate_metadata",
- "auth_tokens", "block_device_mapping", "bw_usage_cache",
- "certificates", "compute_nodes", "consoles", "fixed_ips",
- "floating_ips", "instance_actions", "instance_faults",
- "instance_info_caches", "instance_metadata",
- "instance_type_extra_specs", "iscsi_targets", "key_pairs",
- "migrate_version", "migrations", "networks", "provider_fw_rules",
- "quotas", "s3_images", "security_group_instance_association",
- "security_group_rules", "services", "sm_volume",
- "user_project_role_association", "user_role_association",
- "virtual_interfaces", "virtual_storage_arrays", "volume_metadata",
- "volume_type_extra_specs"]
- sql = "SET foreign_key_checks = 0;"
- for table in tables:
- sql += "ALTER TABLE %s CONVERT TO CHARACTER SET utf8;" % table
- sql += "SET foreign_key_checks = 1;"
- sql += "ALTER DATABASE %s DEFAULT CHARACTER SET utf8;" \
- % migrate_engine.url.database
+ # In Folsom we explicitly converted migrate_version to UTF8.
+ sql = "ALTER TABLE migrate_version CONVERT TO CHARACTER SET utf8;"
+ # Set default DB charset to UTF8.
+ sql += "ALTER DATABASE %s DEFAULT CHARACTER SET utf8;" % \
+ migrate_engine.url.database
migrate_engine.execute(sql)
+ # TODO(dprince): due to the upgrade scripts in Folsom the unique key
+ # on instance_uuid is named 'instance_id'. Rename it in Grizzly?
+ UniqueConstraint('instance_uuid', table=instance_info_caches,
+ name='instance_id').create()
+
if migrate_engine.name == "postgresql":
- # NOTE(dprince): Need to rename the leftover zones stuff.
- # https://bugs.launchpad.net/nova/+bug/993667
- sql = "ALTER TABLE cells_id_seq RENAME TO zones_id_seq;"
- sql += "ALTER TABLE ONLY cells DROP CONSTRAINT cells_pkey;"
- sql += "ALTER TABLE ONLY cells ADD CONSTRAINT zones_pkey" \
- " PRIMARY KEY (id);"
-
- # NOTE(dprince): Need to rename the leftover quota_new stuff.
- # https://bugs.launchpad.net/nova/+bug/993669
- sql += "ALTER TABLE quotas_id_seq RENAME TO quotas_new_id_seq;"
- sql += "ALTER TABLE ONLY quotas DROP CONSTRAINT quotas_pkey;"
- sql += "ALTER TABLE ONLY quotas ADD CONSTRAINT quotas_new_pkey" \
- " PRIMARY KEY (id);"
+ # TODO(dprince): Drop this in Grizzly. Snapshots were converted
+ # to UUIDs in Folsom so we no longer require this autocreated
+ # sequence.
+ sql = """CREATE SEQUENCE snapshots_id_seq START WITH 1 INCREMENT BY 1
+ NO MINVALUE NO MAXVALUE CACHE 1;
+ ALTER SEQUENCE snapshots_id_seq OWNED BY snapshots.id;
+ SELECT pg_catalog.setval('snapshots_id_seq', 1, false);
+ ALTER TABLE ONLY snapshots ALTER COLUMN id SET DEFAULT
+ nextval('snapshots_id_seq'::regclass);"""
+
+ # TODO(dprince): Drop this in Grizzly. Volumes were converted
+ # to UUIDs in Folsom so we no longer require this autocreated
+ # sequence.
+ sql += """CREATE SEQUENCE volumes_id_seq START WITH 1 INCREMENT BY 1
+ NO MINVALUE NO MAXVALUE CACHE 1;
+ ALTER SEQUENCE volumes_id_seq OWNED BY volumes.id;
+ SELECT pg_catalog.setval('volumes_id_seq', 1, false);
+ ALTER TABLE ONLY volumes ALTER COLUMN id SET DEFAULT
+ nextval('volumes_id_seq'::regclass);"""
migrate_engine.execute(sql)
+ # TODO(dprince): due to the upgrade scripts in Folsom the unique key
+ # on instance_uuid is named '.._instance_id_..'. Rename it in Grizzly?
+ UniqueConstraint('instance_uuid', table=instance_info_caches,
+ name='instance_info_caches_instance_id_key').create()
+
# populate initial instance types
_populate_instance_types(instance_types)
def downgrade(migrate_engine):
- raise Exception('Downgrade from Essex is unsupported.')
+ LOG.exception(_('Downgrade from Folsom is unsupported.'))
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/135_add_node_to_instances.py b/nova/db/sqlalchemy/migrate_repo/versions/135_add_node_to_instances.py
new file mode 100644
index 000000000..a208aecf6
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/135_add_node_to_instances.py
@@ -0,0 +1,55 @@
+# Copyright 2012 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import and_, String, Column, MetaData, select, Table
+
+
+def upgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ instances = Table('instances', meta, autoload=True)
+ node = Column('node', String(length=255))
+
+ instances.create_column(node)
+
+ c_nodes = Table('compute_nodes', meta, autoload=True)
+ services = Table('services', meta, autoload=True)
+
+ # set instances.node = compute_nodes.hypervisore_hostname
+ q = select(
+ [instances.c.id, c_nodes.c.hypervisor_hostname],
+ whereclause=and_(
+ instances.c.deleted != True,
+ services.c.deleted != True,
+ services.c.binary == 'nova-compute',
+ c_nodes.c.deleted != True),
+ from_obj=instances.join(services,
+ instances.c.host == services.c.host)
+ .join(c_nodes,
+ services.c.id == c_nodes.c.service_id))
+ for (instance_id, hypervisor_hostname) in q.execute():
+ instances.update().where(instances.c.id == instance_id).\
+ values(node=hypervisor_hostname).\
+ execute()
+
+
+def downgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ instances = Table('instances', meta, autoload=True)
+ node = Column('node', String(length=255))
+
+ instances.drop_column(node)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/119_add_indexes_to_aggregate_metadata.py b/nova/db/sqlalchemy/migrate_repo/versions/136_add_index_to_instances.py
index 0e819a59d..397f8a62b 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/119_add_indexes_to_aggregate_metadata.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/136_add_index_to_instances.py
@@ -1,7 +1,4 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack LLC.
-# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@@ -15,28 +12,30 @@
# License for the specific language governing permissions and limitations
# under the License.
-from sqlalchemy import Index, MetaData, Table
-from sqlalchemy.exc import IntegrityError
+from sqlalchemy import MetaData, Table, Index
+
+INDEX_NAME = 'instances_host_node_deleted_idx'
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
- # Based on aggregate_metadata_get_item
+ instances = Table('instances', meta, autoload=True)
+
+ # Based on instance_get_all_host_and_node
# from: nova/db/sqlalchemy/api.py
- t = Table('aggregate_metadata', meta, autoload=True)
- i = Index('aggregate_metadata_key_idx', t.c.key)
- try:
- i.create(migrate_engine)
- except IntegrityError:
- pass
+ index = Index(INDEX_NAME,
+ instances.c.host, instances.c.node, instances.c.deleted)
+ index.create(migrate_engine)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
- t = Table('aggregate_metadata', meta, autoload=True)
- i = Index('aggregate_metadata_key_idx', t.c.key)
- i.drop(migrate_engine)
+ instances = Table('instances', meta, autoload=True)
+
+ index = Index(INDEX_NAME,
+ instances.c.host, instances.c.node, instances.c.deleted)
+ index.drop(migrate_engine)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/123_add_indexes_to_dns_domains.py b/nova/db/sqlalchemy/migrate_repo/versions/137_add_indexes_to_migrations.py
index 6bc0aed91..1499bd351 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/123_add_indexes_to_dns_domains.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/137_add_indexes_to_migrations.py
@@ -23,11 +23,12 @@ def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
- # Based on _dnsdomain_get
+ t = Table('migrations', meta, autoload=True)
+
+ # Based on migration_get_in_progress_by_host
# from: nova/db/sqlalchemy/api.py
- t = Table('dns_domains', meta, autoload=True)
- i = Index('dns_domains_domain_deleted_idx',
- t.c.domain, t.c.deleted)
+ i = Index('migrations_by_host_and_status_idx', t.c.deleted,
+ t.c.source_compute, t.c.dest_compute, t.c.status)
try:
i.create(migrate_engine)
except IntegrityError:
@@ -38,7 +39,8 @@ def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
- t = Table('dns_domains', meta, autoload=True)
- i = Index('dns_domains_domain_deleted_idx',
- t.c.domain, t.c.deleted)
+ t = Table('migrations', meta, autoload=True)
+
+ i = Index('migrations_by_host_and_status_idx', t.c.deleted,
+ t.c.source_compute, t.c.dest_compute, t.c.status)
i.drop(migrate_engine)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/099_add_disabled_instance_types.py b/nova/db/sqlalchemy/migrate_repo/versions/138_drop_server_name_from_instances.py
index 549426608..2faae3a8e 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/099_add_disabled_instance_types.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/138_drop_server_name_from_instances.py
@@ -1,3 +1,5 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
# Copyright 2012 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -12,9 +14,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-from sqlalchemy import Boolean, Column, MetaData, Table
-
from nova.openstack.common import log as logging
+from sqlalchemy import Column, String, MetaData, Table
LOG = logging.getLogger(__name__)
@@ -23,18 +24,16 @@ def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
- instance_types = Table('instance_types', meta, autoload=True)
- disabled = Column('disabled', Boolean)
-
- instance_types.create_column(disabled)
- instance_types.update().values(disabled=False).execute()
+ instances = Table('instances', meta, autoload=True)
+ server_name = instances.columns.server_name
+ server_name.drop()
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
- instance_types = Table('instance_types', meta, autoload=True)
- disabled = Column('disabled', Boolean)
-
- instance_types.drop_column(disabled)
+ instances = Table('instances', meta, autoload=True)
+ server_name = Column('server_name',
+ String(length=255))
+ instances.create_column(server_name)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/138_sqlite_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/138_sqlite_downgrade.sql
new file mode 100644
index 000000000..89873ccd4
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/138_sqlite_downgrade.sql
@@ -0,0 +1,239 @@
+BEGIN TRANSACTION;
+ CREATE TEMPORARY TABLE instances_backup (
+ created_at DATETIME,
+ updated_at DATETIME,
+ deleted_at DATETIME,
+ deleted BOOLEAN,
+ id INTEGER NOT NULL,
+ internal_id INTEGER,
+ user_id VARCHAR(255),
+ project_id VARCHAR(255),
+ image_ref VARCHAR(255),
+ kernel_id VARCHAR(255),
+ ramdisk_id VARCHAR(255),
+ launch_index INTEGER,
+ key_name VARCHAR(255),
+ key_data TEXT,
+ power_state INTEGER,
+ vm_state VARCHAR(255),
+ memory_mb INTEGER,
+ vcpus INTEGER,
+ hostname VARCHAR(255),
+ host VARCHAR(255),
+ user_data TEXT,
+ reservation_id VARCHAR(255),
+ scheduled_at DATETIME,
+ launched_at DATETIME,
+ terminated_at DATETIME,
+ display_name VARCHAR(255),
+ display_description VARCHAR(255),
+ availability_zone VARCHAR(255),
+ locked BOOLEAN,
+ os_type VARCHAR(255),
+ launched_on TEXT,
+ instance_type_id INTEGER,
+ vm_mode VARCHAR(255),
+ uuid VARCHAR(36),
+ architecture VARCHAR(255),
+ root_device_name VARCHAR(255),
+ access_ip_v4 VARCHAR(255),
+ access_ip_v6 VARCHAR(255),
+ config_drive VARCHAR(255),
+ task_state VARCHAR(255),
+ default_ephemeral_device VARCHAR(255),
+ default_swap_device VARCHAR(255),
+ progress INTEGER,
+ auto_disk_config BOOLEAN,
+ shutdown_terminate BOOLEAN,
+ disable_terminate BOOLEAN,
+ root_gb INTEGER,
+ ephemeral_gb INTEGER,
+ cell_name VARCHAR(255),
+ node VARCHAR(255),
+ PRIMARY KEY (id)
+ );
+
+ INSERT INTO instances_backup
+ SELECT created_at,
+ updated_at,
+ deleted_at,
+ deleted,
+ id,
+ internal_id,
+ user_id,
+ project_id,
+ image_ref,
+ kernel_id,
+ ramdisk_id,
+ launch_index,
+ key_name,
+ key_data,
+ power_state,
+ vm_state,
+ memory_mb,
+ vcpus,
+ hostname,
+ host,
+ user_data,
+ reservation_id,
+ scheduled_at,
+ launched_at,
+ terminated_at,
+ display_name,
+ display_description,
+ availability_zone,
+ locked,
+ os_type,
+ launched_on,
+ instance_type_id,
+ vm_mode,
+ uuid,
+ architecture,
+ root_device_name,
+ access_ip_v4,
+ access_ip_v6,
+ config_drive,
+ task_state,
+ default_ephemeral_device,
+ default_swap_device,
+ progress,
+ auto_disk_config,
+ shutdown_terminate,
+ disable_terminate,
+ root_gb,
+ ephemeral_gb,
+ cell_name,
+ node
+ FROM instances;
+
+ DROP TABLE instances;
+
+ CREATE TABLE instances (
+ created_at DATETIME,
+ updated_at DATETIME,
+ deleted_at DATETIME,
+ deleted BOOLEAN,
+ id INTEGER NOT NULL,
+ internal_id INTEGER,
+ user_id VARCHAR(255),
+ project_id VARCHAR(255),
+ image_ref VARCHAR(255),
+ kernel_id VARCHAR(255),
+ ramdisk_id VARCHAR(255),
+ server_name VARCHAR(255),
+ launch_index INTEGER,
+ key_name VARCHAR(255),
+ key_data TEXT,
+ power_state INTEGER,
+ vm_state VARCHAR(255),
+ memory_mb INTEGER,
+ vcpus INTEGER,
+ hostname VARCHAR(255),
+ host VARCHAR(255),
+ user_data TEXT,
+ reservation_id VARCHAR(255),
+ scheduled_at DATETIME,
+ launched_at DATETIME,
+ terminated_at DATETIME,
+ display_name VARCHAR(255),
+ display_description VARCHAR(255),
+ availability_zone VARCHAR(255),
+ locked BOOLEAN,
+ os_type VARCHAR(255),
+ launched_on TEXT,
+ instance_type_id INTEGER,
+ vm_mode VARCHAR(255),
+ uuid VARCHAR(36),
+ architecture VARCHAR(255),
+ root_device_name VARCHAR(255),
+ access_ip_v4 VARCHAR(255),
+ access_ip_v6 VARCHAR(255),
+ config_drive VARCHAR(255),
+ task_state VARCHAR(255),
+ default_ephemeral_device VARCHAR(255),
+ default_swap_device VARCHAR(255),
+ progress INTEGER,
+ auto_disk_config BOOLEAN,
+ shutdown_terminate BOOLEAN,
+ disable_terminate BOOLEAN,
+ root_gb INTEGER,
+ ephemeral_gb INTEGER,
+ cell_name VARCHAR(255),
+ node VARCHAR(255),
+ PRIMARY KEY (id)
+ );
+
+ CREATE INDEX instances_host_node_deleted_idx
+ ON instances (host, node, deleted);
+
+ CREATE INDEX instances_host_deleted_idx
+ ON instances (host, deleted);
+
+ CREATE INDEX instances_reservation_id_idx
+ ON instances (reservation_id);
+
+ CREATE INDEX instances_terminated_at_launched_at_idx
+ ON instances (terminated_at, launched_at);
+
+ CREATE INDEX instances_uuid_deleted_idx
+ ON instances (uuid, deleted);
+
+ CREATE INDEX instances_task_state_updated_at_idx
+ ON instances (task_state, updated_at);
+
+ INSERT INTO instances
+ SELECT created_at,
+ updated_at,
+ deleted_at,
+ deleted,
+ id,
+ internal_id,
+ user_id,
+ project_id,
+ image_ref,
+ kernel_id,
+ ramdisk_id,
+ hostname,
+ launch_index,
+ key_name,
+ key_data,
+ power_state,
+ vm_state,
+ memory_mb,
+ vcpus,
+ hostname,
+ host,
+ user_data,
+ reservation_id,
+ scheduled_at,
+ launched_at,
+ terminated_at,
+ display_name,
+ display_description,
+ availability_zone,
+ locked,
+ os_type,
+ launched_on,
+ instance_type_id,
+ vm_mode,
+ uuid,
+ architecture,
+ root_device_name,
+ access_ip_v4,
+ access_ip_v6,
+ config_drive,
+ task_state,
+ default_ephemeral_device,
+ default_swap_device,
+ progress,
+ auto_disk_config,
+ shutdown_terminate,
+ disable_terminate,
+ root_gb,
+ ephemeral_gb,
+ cell_name,
+ node
+ FROM instances_backup;
+
+ DROP TABLE instances_backup;
+COMMIT;
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/138_sqlite_upgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/138_sqlite_upgrade.sql
new file mode 100644
index 000000000..9a72d06c6
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/138_sqlite_upgrade.sql
@@ -0,0 +1,239 @@
+BEGIN TRANSACTION;
+ CREATE TEMPORARY TABLE instances_backup (
+ created_at DATETIME,
+ updated_at DATETIME,
+ deleted_at DATETIME,
+ deleted BOOLEAN,
+ id INTEGER NOT NULL,
+ internal_id INTEGER,
+ user_id VARCHAR(255),
+ project_id VARCHAR(255),
+ image_ref VARCHAR(255),
+ kernel_id VARCHAR(255),
+ ramdisk_id VARCHAR(255),
+ server_name VARCHAR(255),
+ launch_index INTEGER,
+ key_name VARCHAR(255),
+ key_data TEXT,
+ power_state INTEGER,
+ vm_state VARCHAR(255),
+ memory_mb INTEGER,
+ vcpus INTEGER,
+ hostname VARCHAR(255),
+ host VARCHAR(255),
+ user_data TEXT,
+ reservation_id VARCHAR(255),
+ scheduled_at DATETIME,
+ launched_at DATETIME,
+ terminated_at DATETIME,
+ display_name VARCHAR(255),
+ display_description VARCHAR(255),
+ availability_zone VARCHAR(255),
+ locked BOOLEAN,
+ os_type VARCHAR(255),
+ launched_on TEXT,
+ instance_type_id INTEGER,
+ vm_mode VARCHAR(255),
+ uuid VARCHAR(36),
+ architecture VARCHAR(255),
+ root_device_name VARCHAR(255),
+ access_ip_v4 VARCHAR(255),
+ access_ip_v6 VARCHAR(255),
+ config_drive VARCHAR(255),
+ task_state VARCHAR(255),
+ default_ephemeral_device VARCHAR(255),
+ default_swap_device VARCHAR(255),
+ progress INTEGER,
+ auto_disk_config BOOLEAN,
+ shutdown_terminate BOOLEAN,
+ disable_terminate BOOLEAN,
+ root_gb INTEGER,
+ ephemeral_gb INTEGER,
+ cell_name VARCHAR(255),
+ node VARCHAR(255),
+ PRIMARY KEY (id)
+ );
+
+ INSERT INTO instances_backup
+ SELECT created_at,
+ updated_at,
+ deleted_at,
+ deleted,
+ id,
+ internal_id,
+ user_id,
+ project_id,
+ image_ref,
+ kernel_id,
+ ramdisk_id,
+ server_name,
+ launch_index,
+ key_name,
+ key_data,
+ power_state,
+ vm_state,
+ memory_mb,
+ vcpus,
+ hostname,
+ host,
+ user_data,
+ reservation_id,
+ scheduled_at,
+ launched_at,
+ terminated_at,
+ display_name,
+ display_description,
+ availability_zone,
+ locked,
+ os_type,
+ launched_on,
+ instance_type_id,
+ vm_mode,
+ uuid,
+ architecture,
+ root_device_name,
+ access_ip_v4,
+ access_ip_v6,
+ config_drive,
+ task_state,
+ default_ephemeral_device,
+ default_swap_device,
+ progress,
+ auto_disk_config,
+ shutdown_terminate,
+ disable_terminate,
+ root_gb,
+ ephemeral_gb,
+ cell_name,
+ node
+ FROM instances;
+
+ DROP TABLE instances;
+
+ CREATE TABLE instances (
+ created_at DATETIME,
+ updated_at DATETIME,
+ deleted_at DATETIME,
+ deleted BOOLEAN,
+ id INTEGER NOT NULL,
+ internal_id INTEGER,
+ user_id VARCHAR(255),
+ project_id VARCHAR(255),
+ image_ref VARCHAR(255),
+ kernel_id VARCHAR(255),
+ ramdisk_id VARCHAR(255),
+ launch_index INTEGER,
+ key_name VARCHAR(255),
+ key_data TEXT,
+ power_state INTEGER,
+ vm_state VARCHAR(255),
+ memory_mb INTEGER,
+ vcpus INTEGER,
+ hostname VARCHAR(255),
+ host VARCHAR(255),
+ user_data TEXT,
+ reservation_id VARCHAR(255),
+ scheduled_at DATETIME,
+ launched_at DATETIME,
+ terminated_at DATETIME,
+ display_name VARCHAR(255),
+ display_description VARCHAR(255),
+ availability_zone VARCHAR(255),
+ locked BOOLEAN,
+ os_type VARCHAR(255),
+ launched_on TEXT,
+ instance_type_id INTEGER,
+ vm_mode VARCHAR(255),
+ uuid VARCHAR(36),
+ architecture VARCHAR(255),
+ root_device_name VARCHAR(255),
+ access_ip_v4 VARCHAR(255),
+ access_ip_v6 VARCHAR(255),
+ config_drive VARCHAR(255),
+ task_state VARCHAR(255),
+ default_ephemeral_device VARCHAR(255),
+ default_swap_device VARCHAR(255),
+ progress INTEGER,
+ auto_disk_config BOOLEAN,
+ shutdown_terminate BOOLEAN,
+ disable_terminate BOOLEAN,
+ root_gb INTEGER,
+ ephemeral_gb INTEGER,
+ cell_name VARCHAR(255),
+ node VARCHAR(255),
+ PRIMARY KEY (id)
+ );
+
+ CREATE INDEX instances_host_node_deleted_idx
+ ON instances (host, node, deleted);
+
+ CREATE INDEX instances_host_deleted_idx
+ ON instances (host, deleted);
+
+ CREATE INDEX instances_reservation_id_idx
+ ON instances (reservation_id);
+
+ CREATE INDEX instances_terminated_at_launched_at_idx
+ ON instances (terminated_at, launched_at);
+
+ CREATE INDEX instances_uuid_deleted_idx
+ ON instances (uuid, deleted);
+
+ CREATE INDEX instances_task_state_updated_at_idx
+ ON instances (task_state, updated_at);
+
+ INSERT INTO instances
+ SELECT created_at,
+ updated_at,
+ deleted_at,
+ deleted,
+ id,
+ internal_id,
+ user_id,
+ project_id,
+ image_ref,
+ kernel_id,
+ ramdisk_id,
+ launch_index,
+ key_name,
+ key_data,
+ power_state,
+ vm_state,
+ memory_mb,
+ vcpus,
+ hostname,
+ host,
+ user_data,
+ reservation_id,
+ scheduled_at,
+ launched_at,
+ terminated_at,
+ display_name,
+ display_description,
+ availability_zone,
+ locked,
+ os_type,
+ launched_on,
+ instance_type_id,
+ vm_mode,
+ uuid,
+ architecture,
+ root_device_name,
+ access_ip_v4,
+ access_ip_v6,
+ config_drive,
+ task_state,
+ default_ephemeral_device,
+ default_swap_device,
+ progress,
+ auto_disk_config,
+ shutdown_terminate,
+ disable_terminate,
+ root_gb,
+ ephemeral_gb,
+ cell_name,
+ node
+ FROM instances_backup;
+
+ DROP TABLE instances_backup;
+COMMIT;
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/130_add_indexes_to_key_pairs.py b/nova/db/sqlalchemy/migrate_repo/versions/139_add_indexes_to_fixed_ips.py
index 82517e53a..8a471c057 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/130_add_indexes_to_key_pairs.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/139_add_indexes_to_fixed_ips.py
@@ -23,11 +23,12 @@ def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
- # Based on key_pair_get
+ t = Table('fixed_ips', meta, autoload=True)
+
+ # Based on fixed_ip_delete_associate
# from: nova/db/sqlalchemy/api.py
- t = Table('key_pairs', meta, autoload=True)
- i = Index('key_pair_user_id_name_idx',
- t.c.user_id, t.c.name)
+ i = Index('fixed_ips_deleted_allocated_idx',
+ t.c.address, t.c.deleted, t.c.allocated)
try:
i.create(migrate_engine)
except IntegrityError:
@@ -38,7 +39,8 @@ def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
- t = Table('key_pairs', meta, autoload=True)
- i = Index('key_pair_user_id_name_idx',
- t.c.user_id, t.c.name)
+ t = Table('fixed_ips', meta, autoload=True)
+
+ i = Index('fixed_ips_deleted_allocated_idx',
+ t.c.address, t.c.deleted, t.c.allocated)
i.drop(migrate_engine)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/140_drop_unused_postgresql_volume_sequences.py b/nova/db/sqlalchemy/migrate_repo/versions/140_drop_unused_postgresql_volume_sequences.py
new file mode 100644
index 000000000..18aa206fe
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/140_drop_unused_postgresql_volume_sequences.py
@@ -0,0 +1,61 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import MetaData
+
+
+def upgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ # NOTE(dprince): Remove unused snapshots/volumes sequences.
+ # These are leftovers from the ID --> UUID conversion for these tables
+ # that occurred in Folsom.
+ if migrate_engine.name == "postgresql":
+ base_query = """SELECT COUNT(*) FROM pg_class c
+ WHERE c.relkind = 'S'
+ AND relname = '%s';"""
+ result = migrate_engine.execute(base_query % "snapshots_id_seq")
+ if result.scalar() > 0:
+ sql = "DROP SEQUENCE snapshots_id_seq CASCADE;"
+ migrate_engine.execute(sql)
+
+ result = migrate_engine.execute(base_query % "volumes_id_seq")
+ if result.scalar() > 0:
+ sql = "DROP SEQUENCE volumes_id_seq CASCADE;"
+ migrate_engine.execute(sql)
+
+
+def downgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ if migrate_engine.name == "postgresql":
+ sql = """CREATE SEQUENCE snapshots_id_seq START WITH 1 INCREMENT BY 1
+ NO MINVALUE NO MAXVALUE CACHE 1;
+ ALTER SEQUENCE snapshots_id_seq OWNED BY snapshots.id;
+ SELECT pg_catalog.setval('snapshots_id_seq', 1, false);
+ ALTER TABLE ONLY snapshots ALTER COLUMN id SET DEFAULT
+ nextval('snapshots_id_seq'::regclass);"""
+
+ sql += """CREATE SEQUENCE volumes_id_seq START WITH 1 INCREMENT BY 1
+ NO MINVALUE NO MAXVALUE CACHE 1;
+ ALTER SEQUENCE volumes_id_seq OWNED BY volumes.id;
+ SELECT pg_catalog.setval('volumes_id_seq', 1, false);
+ ALTER TABLE ONLY volumes ALTER COLUMN id SET DEFAULT
+ nextval('volumes_id_seq'::regclass);"""
+ migrate_engine.execute(sql)
diff --git a/nova/db/sqlalchemy/migration.py b/nova/db/sqlalchemy/migration.py
index c6c9e9b5f..5f17f96f4 100644
--- a/nova/db/sqlalchemy/migration.py
+++ b/nova/db/sqlalchemy/migration.py
@@ -19,10 +19,10 @@
import distutils.version as dist_version
import os
+from nova import config
from nova.db import migration
from nova.db.sqlalchemy.session import get_engine
from nova import exception
-from nova import flags
from nova.openstack.common import log as logging
@@ -61,8 +61,6 @@ from migrate import exceptions as versioning_exceptions
from migrate.versioning import api as versioning_api
from migrate.versioning.repository import Repository
-FLAGS = flags.FLAGS
-
_REPOSITORY = None
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index 8022bad12..e4980e57b 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -27,13 +27,13 @@ from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import ForeignKey, DateTime, Boolean, Text, Float
from sqlalchemy.orm import relationship, backref, object_mapper
+from nova import config
from nova.db.sqlalchemy.session import get_session
from nova import exception
-from nova import flags
from nova.openstack.common import timeutils
-FLAGS = flags.FLAGS
+CONF = config.CONF
BASE = declarative_base()
@@ -200,7 +200,7 @@ class Instance(BASE, NovaBase):
@property
def name(self):
try:
- base_name = FLAGS.instance_name_template % self.id
+ base_name = CONF.instance_name_template % self.id
except TypeError:
# Support templates like "uuid-%(uuid)s", etc.
info = {}
@@ -214,7 +214,7 @@ class Instance(BASE, NovaBase):
continue
info[key] = self[key]
try:
- base_name = FLAGS.instance_name_template % info
+ base_name = CONF.instance_name_template % info
except KeyError:
base_name = self.uuid
return base_name
@@ -228,7 +228,7 @@ class Instance(BASE, NovaBase):
image_ref = Column(String(255))
kernel_id = Column(String(255))
ramdisk_id = Column(String(255))
- server_name = Column(String(255))
+ hostname = Column(String(255))
# image_ref = Column(Integer, ForeignKey('images.id'), nullable=True)
# kernel_id = Column(Integer, ForeignKey('images.id'), nullable=True)
@@ -249,8 +249,12 @@ class Instance(BASE, NovaBase):
root_gb = Column(Integer)
ephemeral_gb = Column(Integer)
- hostname = Column(String(255))
+ # This is not related to hostname, above. It refers
+ # to the nova node.
host = Column(String(255)) # , ForeignKey('hosts.id'))
+ # To identify the "ComputeNode" which the instance resides in.
+ # This equals to ComputeNode.hypervisor_hostname.
+ node = Column(String(255))
# *not* flavor_id
instance_type_id = Column(Integer)
@@ -352,7 +356,7 @@ class Volume(BASE, NovaBase):
@property
def name(self):
- return FLAGS.volume_name_template % self.id
+ return CONF.volume_name_template % self.id
ec2_id = Column(Integer)
user_id = Column(String(255))
@@ -382,49 +386,6 @@ class Volume(BASE, NovaBase):
volume_type_id = Column(Integer)
-class VolumeMetadata(BASE, NovaBase):
- """Represents a metadata key/value pair for a volume"""
- __tablename__ = 'volume_metadata'
- id = Column(Integer, primary_key=True)
- key = Column(String(255))
- value = Column(String(255))
- volume_id = Column(String(36), ForeignKey('volumes.id'), nullable=False)
- volume = relationship(Volume, backref="volume_metadata",
- foreign_keys=volume_id,
- primaryjoin='and_('
- 'VolumeMetadata.volume_id == Volume.id,'
- 'VolumeMetadata.deleted == False)')
-
-
-class VolumeTypes(BASE, NovaBase):
- """Represent possible volume_types of volumes offered"""
- __tablename__ = "volume_types"
- id = Column(Integer, primary_key=True)
- name = Column(String(255))
-
- volumes = relationship(Volume,
- backref=backref('volume_type', uselist=False),
- foreign_keys=id,
- primaryjoin='and_('
- 'Volume.volume_type_id == VolumeTypes.id, '
- 'VolumeTypes.deleted == False)')
-
-
-class VolumeTypeExtraSpecs(BASE, NovaBase):
- """Represents additional specs as key/value pairs for a volume_type"""
- __tablename__ = 'volume_type_extra_specs'
- id = Column(Integer, primary_key=True)
- key = Column(String(255))
- value = Column(String(255))
- volume_type_id = Column(Integer, ForeignKey('volume_types.id'),
- nullable=False)
- volume_type = relationship(VolumeTypes, backref="extra_specs",
- foreign_keys=volume_type_id,
- primaryjoin='and_('
- 'VolumeTypeExtraSpecs.volume_type_id == VolumeTypes.id,'
- 'VolumeTypeExtraSpecs.deleted == False)')
-
-
class Quota(BASE, NovaBase):
"""Represents a single quota override for a project.
@@ -509,11 +470,11 @@ class Snapshot(BASE, NovaBase):
@property
def name(self):
- return FLAGS.snapshot_name_template % self.id
+ return CONF.snapshot_name_template % self.id
@property
def volume_name(self):
- return FLAGS.volume_name_template % self.volume_id
+ return CONF.volume_name_template % self.volume_id
user_id = Column(String(255))
project_id = Column(String(255))
@@ -679,6 +640,11 @@ class Migration(BASE, NovaBase):
#TODO(_cerberus_): enum
status = Column(String(255))
+ instance = relationship("Instance", foreign_keys=instance_uuid,
+ primaryjoin='and_(Migration.instance_uuid == '
+ 'Instance.uuid, Instance.deleted == '
+ 'False)')
+
class Network(BASE, NovaBase):
"""Represents a network."""
@@ -959,32 +925,6 @@ class SnapshotIdMapping(BASE, NovaBase):
uuid = Column(String(36), nullable=False)
-class SMFlavors(BASE, NovaBase):
- """Represents a flavor for SM volumes."""
- __tablename__ = 'sm_flavors'
- id = Column(Integer(), primary_key=True)
- label = Column(String(255))
- description = Column(String(255))
-
-
-class SMBackendConf(BASE, NovaBase):
- """Represents the connection to the backend for SM."""
- __tablename__ = 'sm_backend_config'
- id = Column(Integer(), primary_key=True)
- flavor_id = Column(Integer, ForeignKey('sm_flavors.id'), nullable=False)
- sr_uuid = Column(String(255))
- sr_type = Column(String(255))
- config_params = Column(String(2047))
-
-
-class SMVolume(BASE, NovaBase):
- __tablename__ = 'sm_volume'
- id = Column(String(36), ForeignKey(Volume.id), primary_key=True)
- backend_id = Column(Integer, ForeignKey('sm_backend_config.id'),
- nullable=False)
- vdi_uuid = Column(String(255))
-
-
class InstanceFault(BASE, NovaBase):
__tablename__ = 'instance_faults'
id = Column(Integer(), primary_key=True, autoincrement=True)
diff --git a/nova/db/sqlalchemy/session.py b/nova/db/sqlalchemy/session.py
index 184d279ae..f16b9d161 100644
--- a/nova/db/sqlalchemy/session.py
+++ b/nova/db/sqlalchemy/session.py
@@ -169,12 +169,12 @@ import sqlalchemy.interfaces
import sqlalchemy.orm
from sqlalchemy.pool import NullPool, StaticPool
+from nova import config
import nova.exception
-import nova.flags as flags
import nova.openstack.common.log as logging
-FLAGS = flags.FLAGS
+CONF = config.CONF
LOG = logging.getLogger(__name__)
_ENGINE = None
@@ -205,7 +205,7 @@ def get_engine():
"""Return a SQLAlchemy engine."""
global _ENGINE
if _ENGINE is None:
- _ENGINE = create_engine(FLAGS.sql_connection)
+ _ENGINE = create_engine(CONF.sql_connection)
return _ENGINE
@@ -267,21 +267,26 @@ def create_engine(sql_connection):
connection_dict = sqlalchemy.engine.url.make_url(sql_connection)
engine_args = {
- "pool_recycle": FLAGS.sql_idle_timeout,
+ "pool_recycle": CONF.sql_idle_timeout,
"echo": False,
'convert_unicode': True,
}
+ if CONF.sql_pool_size is not None:
+ engine_args['pool_size'] = CONF.sql_pool_size
+ if CONF.sql_max_overflow is not None:
+ engine_args['max_overflow'] = CONF.sql_max_overflow
+
# Map our SQL debug level to SQLAlchemy's options
- if FLAGS.sql_connection_debug >= 100:
+ if CONF.sql_connection_debug >= 100:
engine_args['echo'] = 'debug'
- elif FLAGS.sql_connection_debug >= 50:
+ elif CONF.sql_connection_debug >= 50:
engine_args['echo'] = True
if "sqlite" in connection_dict.drivername:
engine_args["poolclass"] = NullPool
- if FLAGS.sql_connection == "sqlite://":
+ if CONF.sql_connection == "sqlite://":
engine_args["poolclass"] = StaticPool
engine_args["connect_args"] = {'check_same_thread': False}
@@ -292,12 +297,12 @@ def create_engine(sql_connection):
if 'mysql' in connection_dict.drivername:
sqlalchemy.event.listen(engine, 'checkout', ping_listener)
elif 'sqlite' in connection_dict.drivername:
- if not FLAGS.sqlite_synchronous:
+ if not CONF.sqlite_synchronous:
sqlalchemy.event.listen(engine, 'connect',
synchronous_switch_listener)
sqlalchemy.event.listen(engine, 'connect', add_regexp_listener)
- if (FLAGS.sql_connection_trace and
+ if (CONF.sql_connection_trace and
engine.dialect.dbapi.__name__ == 'MySQLdb'):
import MySQLdb.cursors
_do_query = debug_mysql_do_query()
@@ -309,7 +314,7 @@ def create_engine(sql_connection):
if not is_db_connection_error(e.args[0]):
raise
- remaining = FLAGS.sql_max_retries
+ remaining = CONF.sql_max_retries
if remaining == -1:
remaining = 'infinite'
while True:
@@ -317,7 +322,7 @@ def create_engine(sql_connection):
LOG.warn(msg % remaining)
if remaining != 'infinite':
remaining -= 1
- time.sleep(FLAGS.sql_retry_interval)
+ time.sleep(CONF.sql_retry_interval)
try:
engine.connect()
break
diff --git a/nova/exception.py b/nova/exception.py
index ee5c482a0..ac035efd6 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -217,26 +217,14 @@ class Invalid(NovaException):
code = 400
-class InvalidSnapshot(Invalid):
- message = _("Invalid snapshot") + ": %(reason)s"
-
-
class VolumeUnattached(Invalid):
message = _("Volume %(volume_id)s is not attached to anything")
-class VolumeAttached(Invalid):
- message = _("Volume %(volume_id)s is still attached, detach volume first.")
-
-
class InvalidKeypair(Invalid):
message = _("Keypair data is invalid")
-class SfJsonEncodeFailure(NovaException):
- message = _("Failed to load data into json format")
-
-
class InvalidRequest(Invalid):
message = _("The request is invalid.")
@@ -245,10 +233,6 @@ class InvalidInput(Invalid):
message = _("Invalid input received") + ": %(reason)s"
-class InvalidVolumeType(Invalid):
- message = _("Invalid volume type") + ": %(reason)s"
-
-
class InvalidVolume(Invalid):
message = _("Invalid volume") + ": %(reason)s"
@@ -428,67 +412,18 @@ class NotFound(NovaException):
code = 404
-class VirtDriverNotFound(NotFound):
- message = _("Could not find driver for compute_driver %(name)s")
-
-
-class PersistentVolumeFileNotFound(NotFound):
- message = _("Volume %(volume_id)s persistence file could not be found.")
-
-
class VolumeNotFound(NotFound):
message = _("Volume %(volume_id)s could not be found.")
-class SfAccountNotFound(NotFound):
- message = _("Unable to locate account %(account_name)s on "
- "Solidfire device")
-
-
-class VolumeMetadataNotFound(NotFound):
- message = _("Volume %(volume_id)s has no metadata with "
- "key %(metadata_key)s.")
-
-
-class VolumeTypeNotFound(NotFound):
- message = _("Volume type %(volume_type_id)s could not be found.")
-
-
-class VolumeTypeNotFoundByName(VolumeTypeNotFound):
- message = _("Volume type with name %(volume_type_name)s "
- "could not be found.")
-
-
-class VolumeTypeExtraSpecsNotFound(NotFound):
- message = _("Volume Type %(volume_type_id)s has no extra specs with "
- "key %(extra_specs_key)s.")
-
-
class SnapshotNotFound(NotFound):
message = _("Snapshot %(snapshot_id)s could not be found.")
-class VolumeIsBusy(NovaException):
- message = _("deleting volume %(volume_name)s that has snapshot")
-
-
-class SnapshotIsBusy(NovaException):
- message = _("deleting snapshot %(snapshot_name)s that has "
- "dependent volumes")
-
-
class ISCSITargetNotFoundForVolume(NotFound):
message = _("No target id found for volume %(volume_id)s.")
-class ISCSITargetCreateFailed(NovaException):
- message = _("Failed to create iscsi target for volume %(volume_id)s.")
-
-
-class ISCSITargetRemoveFailed(NovaException):
- message = _("Failed to remove iscsi target for volume %(volume_id)s.")
-
-
class DiskNotFound(NotFound):
message = _("No disk at %(location)s")
@@ -520,6 +455,10 @@ class StorageRepositoryNotFound(NotFound):
message = _("Cannot find SR to read/write VDI.")
+class NetworkDuplicated(NovaException):
+ message = _("Network %(network_id)s is duplicated.")
+
+
class NetworkInUse(NovaException):
message = _("Network %(network_id)s is still in use.")
@@ -557,10 +496,6 @@ class NetworkNotFoundForProject(NotFound):
"is not assigned to the project %(project_id)s.")
-class NetworkHostNotSet(NovaException):
- message = _("Host is not set to the network (%(network_id)s).")
-
-
class DatastoreNotFound(NotFound):
message = _("Could not find the datastore reference(s) which the VM uses.")
@@ -900,10 +835,6 @@ class FlavorAccessExists(Duplicate):
"and project %(project_id)s combination.")
-class VolumeTypeExists(Duplicate):
- message = _("Volume Type %(name)s already exists.")
-
-
class InvalidSharedStorage(NovaException):
message = _("%(path)s is not on shared storage: %(reason)s")
@@ -974,14 +905,6 @@ class TooManyInstances(QuotaError):
" but already used %(used)d of %(allowed)d %(resource)s")
-class VolumeSizeTooLarge(QuotaError):
- message = _("Maximum volume size exceeded")
-
-
-class VolumeLimitExceeded(QuotaError):
- message = _("Maximum number of volumes allowed (%(allowed)d) exceeded")
-
-
class FloatingIpLimitExceeded(QuotaError):
message = _("Maximum number of floating ips exceeded")
@@ -1036,32 +959,6 @@ class AggregateHostExists(Duplicate):
message = _("Aggregate %(aggregate_id)s already has host %(host)s.")
-class DuplicateSfVolumeNames(Duplicate):
- message = _("Detected more than one volume with name %(vol_name)s")
-
-
-class VolumeTypeCreateFailed(NovaException):
- message = _("Cannot create volume_type with "
- "name %(name)s and specs %(extra_specs)s")
-
-
-class VolumeBackendAPIException(NovaException):
- message = _("Bad or unexpected response from the storage volume "
- "backend API: %(data)s")
-
-
-class NfsException(NovaException):
- message = _("Unknown NFS exception")
-
-
-class NfsNoSharesMounted(NotFound):
- message = _("No mounted NFS shares found")
-
-
-class NfsNoSuitableShareFound(NotFound):
- message = _("There is no share which can host %(volume_size)sG")
-
-
class InstanceTypeCreateFailed(NovaException):
message = _("Unable to create instance type")
@@ -1072,14 +969,6 @@ class InstancePasswordSetFailed(NovaException):
safe = True
-class SolidFireAPIException(NovaException):
- message = _("Bad response from SolidFire API")
-
-
-class SolidFireAPIDataException(SolidFireAPIException):
- message = _("Error in SolidFire API response: data=%(data)s")
-
-
class DuplicateVlan(Duplicate):
message = _("Detected existing vlan with id %(vlan)d")
diff --git a/nova/filters.py b/nova/filters.py
new file mode 100644
index 000000000..a3339eff8
--- /dev/null
+++ b/nova/filters.py
@@ -0,0 +1,53 @@
+# Copyright (c) 2011-2012 OpenStack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Filter support
+"""
+
+from nova import loadables
+
+
+class BaseFilter(object):
+ """Base class for all filter classes."""
+ def _filter_one(self, obj, filter_properties):
+ """Return True if it passes the filter, False otherwise.
+ Override this in a subclass.
+ """
+ return True
+
+ def filter_all(self, filter_obj_list, filter_properties):
+ """Yield objects that pass the filter.
+
+ Can be overriden in a subclass, if you need to base filtering
+ decisions on all objects. Otherwise, one can just override
+ _filter_one() to filter a single object.
+ """
+ for obj in filter_obj_list:
+ if self._filter_one(obj, filter_properties):
+ yield obj
+
+
+class BaseFilterHandler(loadables.BaseLoader):
+ """Base class to handle loading filter classes.
+
+ This class should be subclassed where one needs to use filters.
+ """
+
+ def get_filtered_objects(self, filter_classes, objs,
+ filter_properties):
+ for filter_cls in filter_classes:
+ objs = filter_cls().filter_all(objs, filter_properties)
+ return list(objs)
diff --git a/nova/flags.py b/nova/flags.py
deleted file mode 100644
index 497d65ca7..000000000
--- a/nova/flags.py
+++ /dev/null
@@ -1,380 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-# Copyright 2012 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Command-line flag library.
-
-Emulates gflags by wrapping cfg.ConfigOpts.
-
-The idea is to move fully to cfg eventually, and this wrapper is a
-stepping stone.
-
-"""
-
-import os
-import socket
-import sys
-
-from nova import config
-from nova.openstack.common import cfg
-
-CONF = config.CONF
-FLAGS = CONF
-
-
-def _get_my_ip():
- """
- Returns the actual ip of the local machine.
-
- This code figures out what source address would be used if some traffic
- were to be sent out to some well known address on the Internet. In this
- case, a Google DNS server is used, but the specific address does not
- matter much. No traffic is actually sent.
- """
- try:
- csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
- csock.connect(('8.8.8.8', 80))
- (addr, port) = csock.getsockname()
- csock.close()
- return addr
- except socket.error:
- return "127.0.0.1"
-
-
-core_opts = [
- cfg.StrOpt('sql_connection',
- default='sqlite:///$state_path/$sqlite_db',
- help='The SQLAlchemy connection string used to connect to the '
- 'database'),
- cfg.StrOpt('api_paste_config',
- default="api-paste.ini",
- help='File name for the paste.deploy config for nova-api'),
- cfg.StrOpt('pybasedir',
- default=os.path.abspath(os.path.join(os.path.dirname(__file__),
- '../')),
- help='Directory where the nova python module is installed'),
- cfg.StrOpt('bindir',
- default='$pybasedir/bin',
- help='Directory where nova binaries are installed'),
- cfg.StrOpt('state_path',
- default='$pybasedir',
- help="Top-level directory for maintaining nova's state"),
- ]
-
-debug_opts = [
- cfg.BoolOpt('fake_network',
- default=False,
- help='If passed, use fake network devices and addresses'),
- cfg.IntOpt('sql_connection_debug',
- default=0,
- help='Verbosity of SQL debugging information. 0=None, '
- '100=Everything'),
- cfg.BoolOpt('sql_connection_trace',
- default=False,
- help='Add python stack traces to SQL as comment strings'),
-]
-
-CONF.register_cli_opts(core_opts)
-CONF.register_cli_opts(debug_opts)
-
-global_opts = [
- cfg.StrOpt('my_ip',
- default=_get_my_ip(),
- help='ip address of this host'),
- cfg.ListOpt('region_list',
- default=[],
- help='list of region=fqdn pairs separated by commas'),
- cfg.StrOpt('aws_access_key_id',
- default='admin',
- help='AWS Access ID'),
- cfg.StrOpt('aws_secret_access_key',
- default='admin',
- help='AWS Access Key'),
- cfg.StrOpt('glance_host',
- default='$my_ip',
- help='default glance hostname or ip'),
- cfg.IntOpt('glance_port',
- default=9292,
- help='default glance port'),
- cfg.ListOpt('glance_api_servers',
- default=['$glance_host:$glance_port'],
- help='A list of the glance api servers available to nova. '
- 'Prefix with https:// for ssl-based glance api servers. '
- '([hostname|ip]:port)'),
- cfg.BoolOpt('glance_api_insecure',
- default=False,
- help='Allow to perform insecure SSL (https) requests to '
- 'glance'),
- cfg.IntOpt('glance_num_retries',
- default=0,
- help='Number retries when downloading an image from glance'),
- cfg.IntOpt('s3_port',
- default=3333,
- help='port used when accessing the s3 api'),
- cfg.StrOpt('s3_host',
- default='$my_ip',
- help='hostname or ip for openstack to use when accessing '
- 'the s3 api'),
- cfg.StrOpt('cert_topic',
- default='cert',
- help='the topic cert nodes listen on'),
- cfg.StrOpt('compute_topic',
- default='compute',
- help='the topic compute nodes listen on'),
- cfg.StrOpt('console_topic',
- default='console',
- help='the topic console proxy nodes listen on'),
- cfg.StrOpt('scheduler_topic',
- default='scheduler',
- help='the topic scheduler nodes listen on'),
- cfg.StrOpt('network_topic',
- default='network',
- help='the topic network nodes listen on'),
- cfg.BoolOpt('api_rate_limit',
- default=True,
- help='whether to rate limit the api'),
- cfg.ListOpt('enabled_apis',
- default=['ec2', 'osapi_compute', 'metadata'],
- help='a list of APIs to enable by default'),
- cfg.StrOpt('ec2_host',
- default='$my_ip',
- help='the ip of the ec2 api server'),
- cfg.StrOpt('ec2_dmz_host',
- default='$my_ip',
- help='the internal ip of the ec2 api server'),
- cfg.IntOpt('ec2_port',
- default=8773,
- help='the port of the ec2 api server'),
- cfg.StrOpt('ec2_scheme',
- default='http',
- help='the protocol to use when connecting to the ec2 api '
- 'server (http, https)'),
- cfg.StrOpt('ec2_path',
- default='/services/Cloud',
- help='the path prefix used to call the ec2 api server'),
- cfg.ListOpt('osapi_compute_ext_list',
- default=[],
- help='Specify list of extensions to load when using osapi_'
- 'compute_extension option with nova.api.openstack.'
- 'compute.contrib.select_extensions'),
- cfg.MultiStrOpt('osapi_compute_extension',
- default=[
- 'nova.api.openstack.compute.contrib.standard_extensions'
- ],
- help='osapi compute extension to load'),
- cfg.StrOpt('osapi_path',
- default='/v1.1/',
- help='the path prefix used to call the openstack api server'),
- cfg.StrOpt('osapi_compute_link_prefix',
- default=None,
- help='Base URL that will be presented to users in links '
- 'to the OpenStack Compute API'),
- cfg.StrOpt('osapi_glance_link_prefix',
- default=None,
- help='Base URL that will be presented to users in links '
- 'to glance resources'),
- cfg.IntOpt('osapi_max_limit',
- default=1000,
- help='the maximum number of items returned in a single '
- 'response from a collection resource'),
- cfg.StrOpt('metadata_host',
- default='$my_ip',
- help='the ip for the metadata api server'),
- cfg.IntOpt('metadata_port',
- default=8775,
- help='the port for the metadata api port'),
- cfg.StrOpt('default_image',
- default='ami-11111',
- help='default image to use, testing only'),
- cfg.StrOpt('default_instance_type',
- default='m1.small',
- help='default instance type to use, testing only'),
- cfg.StrOpt('null_kernel',
- default='nokernel',
- help='kernel image that indicates not to use a kernel, but to '
- 'use a raw disk image instead'),
- cfg.StrOpt('vpn_image_id',
- default='0',
- help='image id used when starting up a cloudpipe vpn server'),
- cfg.StrOpt('vpn_key_suffix',
- default='-vpn',
- help='Suffix to add to project name for vpn key and secgroups'),
- cfg.StrOpt('sqlite_db',
- default='nova.sqlite',
- help='the filename to use with sqlite'),
- cfg.BoolOpt('sqlite_synchronous',
- default=True,
- help='If passed, use synchronous mode for sqlite'),
- cfg.IntOpt('sql_idle_timeout',
- default=3600,
- help='timeout before idle sql connections are reaped'),
- cfg.IntOpt('sql_max_retries',
- default=10,
- help='maximum db connection retries during startup. '
- '(setting -1 implies an infinite retry count)'),
- cfg.IntOpt('sql_retry_interval',
- default=10,
- help='interval between retries of opening a sql connection'),
- cfg.StrOpt('compute_manager',
- default='nova.compute.manager.ComputeManager',
- help='full class name for the Manager for compute'),
- cfg.StrOpt('console_manager',
- default='nova.console.manager.ConsoleProxyManager',
- help='full class name for the Manager for console proxy'),
- cfg.StrOpt('cert_manager',
- default='nova.cert.manager.CertManager',
- help='full class name for the Manager for cert'),
- cfg.StrOpt('instance_dns_manager',
- default='nova.network.dns_driver.DNSDriver',
- help='full class name for the DNS Manager for instance IPs'),
- cfg.StrOpt('instance_dns_domain',
- default='',
- help='full class name for the DNS Zone for instance IPs'),
- cfg.StrOpt('floating_ip_dns_manager',
- default='nova.network.dns_driver.DNSDriver',
- help='full class name for the DNS Manager for floating IPs'),
- cfg.StrOpt('network_manager',
- default='nova.network.manager.VlanManager',
- help='full class name for the Manager for network'),
- cfg.StrOpt('scheduler_manager',
- default='nova.scheduler.manager.SchedulerManager',
- help='full class name for the Manager for scheduler'),
- cfg.StrOpt('host',
- default=socket.getfqdn(),
- help='Name of this node. This can be an opaque identifier. '
- 'It is not necessarily a hostname, FQDN, or IP address. '
- 'However, the node name must be valid within '
- 'an AMQP key, and if using ZeroMQ, a valid '
- 'hostname, FQDN, or IP address'),
- cfg.StrOpt('node_availability_zone',
- default='nova',
- help='availability zone of this node'),
- cfg.ListOpt('memcached_servers',
- default=None,
- help='Memcached servers or None for in process cache.'),
- cfg.StrOpt('instance_usage_audit_period',
- default='month',
- help='time period to generate instance usages for. '
- 'Time period must be hour, day, month or year'),
- cfg.IntOpt('bandwidth_poll_interval',
- default=600,
- help='interval to pull bandwidth usage info'),
- cfg.BoolOpt('start_guests_on_host_boot',
- default=False,
- help='Whether to restart guests when the host reboots'),
- cfg.BoolOpt('resume_guests_state_on_host_boot',
- default=False,
- help='Whether to start guests that were running before the '
- 'host rebooted'),
- cfg.StrOpt('default_ephemeral_format',
- default=None,
- help='The default format an ephemeral_volume will be '
- 'formatted with on creation.'),
- cfg.StrOpt('rootwrap_config',
- default="/etc/nova/rootwrap.conf",
- help='Path to the rootwrap configuration file to use for '
- 'running commands as root'),
- cfg.StrOpt('network_driver',
- default='nova.network.linux_net',
- help='Driver to use for network creation'),
- cfg.BoolOpt('use_ipv6',
- default=False,
- help='use ipv6'),
- cfg.BoolOpt('enable_instance_password',
- default=True,
- help='Allows use of instance password during '
- 'server creation'),
- cfg.IntOpt('password_length',
- default=12,
- help='Length of generated instance admin passwords'),
- cfg.BoolOpt('monkey_patch',
- default=False,
- help='Whether to log monkey patching'),
- cfg.ListOpt('monkey_patch_modules',
- default=[
- 'nova.api.ec2.cloud:nova.notifier.api.notify_decorator',
- 'nova.compute.api:nova.notifier.api.notify_decorator'
- ],
- help='List of modules/decorators to monkey patch'),
- cfg.BoolOpt('allow_resize_to_same_host',
- default=False,
- help='Allow destination machine to match source for resize. '
- 'Useful when testing in single-host environments.'),
- cfg.IntOpt('reclaim_instance_interval',
- default=0,
- help='Interval in seconds for reclaiming deleted instances'),
- cfg.IntOpt('zombie_instance_updated_at_window',
- default=172800,
- help='Number of seconds zombie instances are cleaned up.'),
- cfg.IntOpt('service_down_time',
- default=60,
- help='maximum time since last check-in for up service'),
- cfg.StrOpt('default_schedule_zone',
- default=None,
- help='availability zone to use when user doesn\'t specify one'),
- cfg.ListOpt('isolated_images',
- default=[],
- help='Images to run on isolated host'),
- cfg.ListOpt('isolated_hosts',
- default=[],
- help='Host reserved for specific images'),
- cfg.StrOpt('cache_images',
- default='all',
- help='Cache glance images locally. `all` will cache all'
- ' images, `some` will only cache images that have the'
- ' image_property `cache_in_nova=True`, and `none` turns'
- ' off caching entirely'),
- cfg.BoolOpt('use_cow_images',
- default=True,
- help='Whether to use cow images'),
- cfg.StrOpt('compute_api_class',
- default='nova.compute.api.API',
- help='The full class name of the compute API class to use'),
- cfg.StrOpt('network_api_class',
- default='nova.network.api.API',
- help='The full class name of the network API class to use'),
- cfg.StrOpt('volume_api_class',
- default='nova.volume.cinder.API',
- help='The full class name of the volume API class to use'),
- cfg.StrOpt('security_group_handler',
- default='nova.network.sg.NullSecurityGroupHandler',
- help='The full class name of the security group handler class'),
- cfg.StrOpt('default_access_ip_network_name',
- default=None,
- help='Name of network to use to set access ips for instances'),
- cfg.StrOpt('auth_strategy',
- default='noauth',
- help='The strategy to use for auth: noauth or keystone.'),
- cfg.ListOpt('non_inheritable_image_properties',
- default=['cache_in_nova',
- 'bittorrent'],
- help='These are image properties which a snapshot should not'
- ' inherit from an instance'),
- cfg.BoolOpt('defer_iptables_apply',
- default=False,
- help='Whether to batch up the application of IPTables rules'
- ' during a host restart and apply all at the end of the'
- ' init phase'),
- cfg.StrOpt('compute_driver',
- help='Driver to use for controlling virtualization. Options '
- 'include: libvirt.LibvirtDriver, xenapi.XenAPIDriver, '
- 'fake.FakeDriver, baremetal.BareMetalDriver, '
- 'vmwareapi.VMWareESXDriver'),
-]
-
-CONF.register_opts(global_opts)
diff --git a/nova/image/glance.py b/nova/image/glance.py
index 0cbc91531..6e0a57773 100644
--- a/nova/image/glance.py
+++ b/nova/image/glance.py
@@ -31,7 +31,6 @@ import glanceclient.exc
from nova import config
from nova import exception
-from nova import flags
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
diff --git a/nova/image/s3.py b/nova/image/s3.py
index d252baba0..e6ab8bb15 100644
--- a/nova/image/s3.py
+++ b/nova/image/s3.py
@@ -33,7 +33,6 @@ from nova.api.ec2 import ec2utils
import nova.cert.rpcapi
from nova import config
from nova import exception
-from nova import flags
from nova.image import glance
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
diff --git a/nova/ipv6/api.py b/nova/ipv6/api.py
index 96e30e966..23a6a365f 100644
--- a/nova/ipv6/api.py
+++ b/nova/ipv6/api.py
@@ -15,7 +15,6 @@
# under the License.
from nova import config
-from nova import flags
from nova.openstack.common import cfg
from nova import utils
diff --git a/nova/loadables.py b/nova/loadables.py
new file mode 100644
index 000000000..0c930267e
--- /dev/null
+++ b/nova/loadables.py
@@ -0,0 +1,116 @@
+# Copyright (c) 2011-2012 OpenStack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Generic Loadable class support.
+
+Meant to be used by such things as scheduler filters and weights where we
+want to load modules from certain directories and find certain types of
+classes within those modules. Note that this is quite different than
+generic plugins and the pluginmanager code that exists elsewhere.
+
+Usage:
+
+Create a directory with an __init__.py with code such as:
+
+class SomeLoadableClass(object):
+ pass
+
+
+class MyLoader(nova.loadables.BaseLoader)
+ def __init__(self):
+ super(MyLoader, self).__init__(SomeLoadableClass)
+
+If you create modules in the same directory and subclass SomeLoadableClass
+within them, MyLoader().get_all_classes() will return a list
+of such classes.
+"""
+
+import inspect
+import os
+import sys
+
+from nova import exception
+from nova.openstack.common import importutils
+
+
+class BaseLoader(object):
+ def __init__(self, loadable_cls_type):
+ mod = sys.modules[self.__class__.__module__]
+ self.path = mod.__path__[0]
+ self.package = mod.__package__
+ self.loadable_cls_type = loadable_cls_type
+
+ def _is_correct_class(self, obj):
+ """Return whether an object is a class of the correct type and
+ is not prefixed with an underscore.
+ """
+ return (inspect.isclass(obj) and
+ (not obj.__name__.startswith('_')) and
+ issubclass(obj, self.loadable_cls_type))
+
+ def _get_classes_from_module(self, module_name):
+ """Get the classes from a module that match the type we want."""
+ classes = []
+ module = importutils.import_module(module_name)
+ for obj_name in dir(module):
+ # Skip objects that are meant to be private.
+ if obj_name.startswith('_'):
+ continue
+ itm = getattr(module, obj_name)
+ if self._is_correct_class(itm):
+ classes.append(itm)
+ return classes
+
+ def get_all_classes(self):
+ """Get the classes of the type we want from all modules found
+ in the directory that defines this class.
+ """
+ classes = []
+ for dirpath, dirnames, filenames in os.walk(self.path):
+ relpath = os.path.relpath(dirpath, self.path)
+ if relpath == '.':
+ relpkg = ''
+ else:
+ relpkg = '.%s' % '.'.join(relpath.split(os.sep))
+ for fname in filenames:
+ root, ext = os.path.splitext(fname)
+ if ext != '.py' or root == '__init__':
+ continue
+ module_name = "%s%s.%s" % (self.package, relpkg, root)
+ mod_classes = self._get_classes_from_module(module_name)
+ classes.extend(mod_classes)
+ return classes
+
+ def get_matching_classes(self, loadable_class_names):
+ """Get loadable classes from a list of names. Each name can be
+ a full module path or the full path to a method that returns
+ classes to use. The latter behavior is useful to specify a method
+ that returns a list of classes to use in a default case.
+ """
+ classes = []
+ for cls_name in loadable_class_names:
+ obj = importutils.import_class(cls_name)
+ if self._is_correct_class(obj):
+ classes.append(obj)
+ elif inspect.isfunction(obj):
+ # Get list of classes from a function
+ for cls in obj():
+ classes.append(cls)
+ else:
+ error_str = 'Not a class of the correct type'
+ raise exception.ClassNotFound(class_name=cls_name,
+ exception=error_str)
+ return classes
diff --git a/nova/locale/nova.pot b/nova/locale/nova.pot
index 5301ee2c5..e3f900ecf 100644
--- a/nova/locale/nova.pot
+++ b/nova/locale/nova.pot
@@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: nova 2013.1\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2012-11-07 00:02+0000\n"
+"POT-Creation-Date: 2012-11-18 00:02+0000\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
@@ -27,41 +27,41 @@ msgstr ""
msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r"
msgstr ""
-#: nova/crypto.py:47
+#: nova/crypto.py:48
msgid "Filename of root CA"
msgstr ""
-#: nova/crypto.py:50
+#: nova/crypto.py:51
msgid "Filename of private key"
msgstr ""
-#: nova/crypto.py:53
+#: nova/crypto.py:54
msgid "Filename of root Certificate Revocation List"
msgstr ""
-#: nova/crypto.py:56
+#: nova/crypto.py:57
msgid "Where we keep our keys"
msgstr ""
-#: nova/crypto.py:59
+#: nova/crypto.py:60
msgid "Where we keep our root CA"
msgstr ""
-#: nova/crypto.py:62
+#: nova/crypto.py:63
msgid "Should we use a CA for each project?"
msgstr ""
-#: nova/crypto.py:66
+#: nova/crypto.py:67
#, python-format
msgid "Subject for certificate for users, %s for project, user, timestamp"
msgstr ""
-#: nova/crypto.py:71
+#: nova/crypto.py:72
#, python-format
msgid "Subject for certificate for projects, %s for project, timestamp"
msgstr ""
-#: nova/crypto.py:301
+#: nova/crypto.py:302
#, python-format
msgid "Flags path: %s"
msgstr ""
@@ -84,257 +84,240 @@ msgstr ""
msgid "DB exception wrapped."
msgstr ""
-#: nova/exception.py:131
+#: nova/exception.py:129
msgid "An unknown exception occurred."
msgstr ""
-#: nova/exception.py:152 nova/openstack/common/rpc/common.py:46
+#: nova/exception.py:150 nova/openstack/common/rpc/common.py:46
msgid "Exception in string format operation"
msgstr ""
-#: nova/exception.py:162
+#: nova/exception.py:160
msgid "Unknown"
msgstr ""
-#: nova/exception.py:179
+#: nova/exception.py:177
msgid "Failed to decrypt text"
msgstr ""
-#: nova/exception.py:183
+#: nova/exception.py:181
msgid "Virtual Interface creation failed"
msgstr ""
-#: nova/exception.py:187
+#: nova/exception.py:185
msgid "5 attempts to create virtual interfacewith unique mac address failed"
msgstr ""
-#: nova/exception.py:192
+#: nova/exception.py:190
#, python-format
msgid "Connection to glance host %(host)s:%(port)s failed: %(reason)s"
msgstr ""
-#: nova/exception.py:197
+#: nova/exception.py:195
msgid "Not authorized."
msgstr ""
-#: nova/exception.py:202
+#: nova/exception.py:200
msgid "User does not have admin privileges"
msgstr ""
-#: nova/exception.py:206
+#: nova/exception.py:204
#, python-format
msgid "Policy doesn't allow %(action)s to be performed."
msgstr ""
-#: nova/exception.py:210
+#: nova/exception.py:208
#, python-format
msgid "Image %(image_id)s is not active."
msgstr ""
-#: nova/exception.py:214
+#: nova/exception.py:212
#, python-format
msgid "Not authorized for image %(image_id)s."
msgstr ""
-#: nova/exception.py:218
+#: nova/exception.py:216
msgid "Unacceptable parameters."
msgstr ""
-#: nova/exception.py:223
-msgid "Invalid snapshot"
-msgstr ""
-
-#: nova/exception.py:227
+#: nova/exception.py:221
#, python-format
msgid "Volume %(volume_id)s is not attached to anything"
msgstr ""
-#: nova/exception.py:231
-#, python-format
-msgid "Volume %(volume_id)s is still attached, detach volume first."
-msgstr ""
-
-#: nova/exception.py:235 nova/api/ec2/cloud.py:390 nova/api/ec2/cloud.py:415
-#: nova/api/openstack/compute/contrib/keypairs.py:98 nova/compute/api.py:2238
+#: nova/exception.py:225 nova/api/ec2/cloud.py:390 nova/api/ec2/cloud.py:415
+#: nova/api/openstack/compute/contrib/keypairs.py:98 nova/compute/api.py:2250
msgid "Keypair data is invalid"
msgstr ""
-#: nova/exception.py:239
-msgid "Failed to load data into json format"
-msgstr ""
-
-#: nova/exception.py:243
+#: nova/exception.py:229
msgid "The request is invalid."
msgstr ""
-#: nova/exception.py:247
+#: nova/exception.py:233
msgid "Invalid input received"
msgstr ""
-#: nova/exception.py:251
-msgid "Invalid volume type"
-msgstr ""
-
-#: nova/exception.py:255
+#: nova/exception.py:237
msgid "Invalid volume"
msgstr ""
-#: nova/exception.py:259 nova/api/openstack/compute/servers.py:1285
+#: nova/exception.py:241 nova/api/openstack/compute/servers.py:1285
#: nova/api/openstack/compute/contrib/admin_actions.py:239
msgid "Invalid metadata"
msgstr ""
-#: nova/exception.py:263
+#: nova/exception.py:245
msgid "Invalid metadata size"
msgstr ""
-#: nova/exception.py:267
+#: nova/exception.py:249
#, python-format
msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s"
msgstr ""
-#: nova/exception.py:271 nova/api/ec2/cloud.py:572
+#: nova/exception.py:253 nova/api/ec2/cloud.py:572
#, python-format
msgid "Invalid IP protocol %(protocol)s."
msgstr ""
-#: nova/exception.py:275
+#: nova/exception.py:257
#, python-format
msgid "Invalid content type %(content_type)s."
msgstr ""
-#: nova/exception.py:279
+#: nova/exception.py:261
#, python-format
msgid "Invalid cidr %(cidr)s."
msgstr ""
-#: nova/exception.py:283
+#: nova/exception.py:265
msgid "Invalid Parameter: Unicode is not supported by the current database."
msgstr ""
-#: nova/exception.py:290
+#: nova/exception.py:272
#, python-format
msgid "%(err)s"
msgstr ""
-#: nova/exception.py:294
+#: nova/exception.py:276
#, python-format
msgid ""
"Cannot perform action '%(action)s' on aggregate %(aggregate_id)s. Reason:"
" %(reason)s."
msgstr ""
-#: nova/exception.py:299
+#: nova/exception.py:281
#, python-format
msgid "Group not valid. Reason: %(reason)s"
msgstr ""
-#: nova/exception.py:303
+#: nova/exception.py:285
msgid "Sort key supplied was not valid."
msgstr ""
-#: nova/exception.py:307
+#: nova/exception.py:289
#, python-format
msgid ""
"Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while"
" the instance is in this state."
msgstr ""
-#: nova/exception.py:312
+#: nova/exception.py:294
#, python-format
msgid "Instance %(instance_id)s is not running."
msgstr ""
-#: nova/exception.py:316
+#: nova/exception.py:298
#, python-format
msgid "Instance %(instance_id)s is not in rescue mode"
msgstr ""
-#: nova/exception.py:320
+#: nova/exception.py:302
#, python-format
msgid "Instance %(instance_id)s is not ready"
msgstr ""
-#: nova/exception.py:324
+#: nova/exception.py:306
msgid "Failed to suspend instance"
msgstr ""
-#: nova/exception.py:328
+#: nova/exception.py:310
msgid "Failed to resume server"
msgstr ""
-#: nova/exception.py:332
+#: nova/exception.py:314
msgid "Failed to reboot instance"
msgstr ""
-#: nova/exception.py:336
+#: nova/exception.py:318
msgid "Failed to terminate instance"
msgstr ""
-#: nova/exception.py:340
+#: nova/exception.py:322
msgid "Service is unavailable at this time."
msgstr ""
-#: nova/exception.py:344
+#: nova/exception.py:326
msgid "Insufficient compute resources."
msgstr ""
-#: nova/exception.py:348
+#: nova/exception.py:330
msgid "Compute service is unavailable at this time."
msgstr ""
-#: nova/exception.py:352
+#: nova/exception.py:334
#, python-format
msgid "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)."
msgstr ""
-#: nova/exception.py:357
+#: nova/exception.py:339
msgid "The supplied hypervisor type of is invalid."
msgstr ""
-#: nova/exception.py:361
+#: nova/exception.py:343
msgid "The instance requires a newer hypervisor version than has been provided."
msgstr ""
-#: nova/exception.py:366
+#: nova/exception.py:348
#, python-format
msgid ""
"The supplied disk path (%(path)s) already exists, it is expected not to "
"exist."
msgstr ""
-#: nova/exception.py:371
+#: nova/exception.py:353
#, python-format
msgid "The supplied device path (%(path)s) is invalid."
msgstr ""
-#: nova/exception.py:375
+#: nova/exception.py:357
#, python-format
msgid "The supplied device path (%(path)s) is in use."
msgstr ""
-#: nova/exception.py:379
+#: nova/exception.py:361
#, python-format
msgid "The supplied device (%(device)s) is busy."
msgstr ""
-#: nova/exception.py:383
+#: nova/exception.py:365
msgid "Unacceptable CPU info"
msgstr ""
-#: nova/exception.py:387
+#: nova/exception.py:369
#, python-format
msgid "%(address)s is not a valid IP v4/6 address."
msgstr ""
-#: nova/exception.py:391
+#: nova/exception.py:373
#, python-format
msgid ""
"VLAN tag is not appropriate for the port group %(bridge)s. Expected VLAN "
"tag is %(tag)s, but the one associated with the port group is %(pgroup)s."
msgstr ""
-#: nova/exception.py:397
+#: nova/exception.py:379
#, python-format
msgid ""
"vSwitch which contains the port group %(bridge)s is not associated with "
@@ -342,132 +325,75 @@ msgid ""
"one associated is %(actual)s."
msgstr ""
-#: nova/exception.py:404
+#: nova/exception.py:386
#, python-format
msgid "Disk format %(disk_format)s is not acceptable"
msgstr ""
-#: nova/exception.py:408
+#: nova/exception.py:390
#, python-format
msgid "Image %(image_id)s is unacceptable: %(reason)s"
msgstr ""
-#: nova/exception.py:412
+#: nova/exception.py:394
#, python-format
msgid "Instance %(instance_id)s is unacceptable: %(reason)s"
msgstr ""
-#: nova/exception.py:416
+#: nova/exception.py:398
#, python-format
msgid "Ec2 id %(ec2_id)s is unacceptable."
msgstr ""
-#: nova/exception.py:420
+#: nova/exception.py:402
#, python-format
msgid "Expected a uuid but received %(uuid)s."
msgstr ""
-#: nova/exception.py:424
+#: nova/exception.py:406
msgid "Constraint not met."
msgstr ""
-#: nova/exception.py:429
+#: nova/exception.py:411
msgid "Resource could not be found."
msgstr ""
-#: nova/exception.py:434
-#, python-format
-msgid "Could not find driver for compute_driver %(name)s"
-msgstr ""
-
-#: nova/exception.py:438
-#, python-format
-msgid "Volume %(volume_id)s persistence file could not be found."
-msgstr ""
-
-#: nova/exception.py:442
+#: nova/exception.py:416
#, python-format
msgid "Volume %(volume_id)s could not be found."
msgstr ""
-#: nova/exception.py:446
-#, python-format
-msgid "Unable to locate account %(account_name)s on Solidfire device"
-msgstr ""
-
-#: nova/exception.py:451
-#, python-format
-msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s."
-msgstr ""
-
-#: nova/exception.py:456
-#, python-format
-msgid "Volume type %(volume_type_id)s could not be found."
-msgstr ""
-
-#: nova/exception.py:460
-#, python-format
-msgid "Volume type with name %(volume_type_name)s could not be found."
-msgstr ""
-
-#: nova/exception.py:465
-#, python-format
-msgid ""
-"Volume Type %(volume_type_id)s has no extra specs with key "
-"%(extra_specs_key)s."
-msgstr ""
-
-#: nova/exception.py:470
+#: nova/exception.py:420
#, python-format
msgid "Snapshot %(snapshot_id)s could not be found."
msgstr ""
-#: nova/exception.py:474
-#, python-format
-msgid "deleting volume %(volume_name)s that has snapshot"
-msgstr ""
-
-#: nova/exception.py:478
-#, python-format
-msgid "deleting snapshot %(snapshot_name)s that has dependent volumes"
-msgstr ""
-
-#: nova/exception.py:483
+#: nova/exception.py:424
#, python-format
msgid "No target id found for volume %(volume_id)s."
msgstr ""
-#: nova/exception.py:487
-#, python-format
-msgid "Failed to create iscsi target for volume %(volume_id)s."
-msgstr ""
-
-#: nova/exception.py:491
-#, python-format
-msgid "Failed to remove iscsi target for volume %(volume_id)s."
-msgstr ""
-
-#: nova/exception.py:495
+#: nova/exception.py:428
#, python-format
msgid "No disk at %(location)s"
msgstr ""
-#: nova/exception.py:499
+#: nova/exception.py:432
#, python-format
msgid "Could not find a handler for %(driver_type)s volume."
msgstr ""
-#: nova/exception.py:503
+#: nova/exception.py:436
#, python-format
msgid "Invalid image href %(image_href)s."
msgstr ""
-#: nova/exception.py:507
+#: nova/exception.py:440
#, python-format
msgid "Image %(image_id)s could not be found."
msgstr ""
-#: nova/exception.py:511
+#: nova/exception.py:444
#, python-format
msgid ""
"Image %(image_id)s could not be found. The nova EC2 API assigns image ids"
@@ -475,980 +401,929 @@ msgid ""
"image ids since adding this image?"
msgstr ""
-#: nova/exception.py:518
+#: nova/exception.py:451
#, python-format
msgid "Project %(project_id)s could not be found."
msgstr ""
-#: nova/exception.py:522
+#: nova/exception.py:455
msgid "Cannot find SR to read/write VDI."
msgstr ""
-#: nova/exception.py:526
+#: nova/exception.py:459
+#, python-format
+msgid "Network %(network_id)s is duplicated."
+msgstr ""
+
+#: nova/exception.py:463
#, python-format
msgid "Network %(network_id)s is still in use."
msgstr ""
-#: nova/exception.py:530
+#: nova/exception.py:467
#, python-format
msgid "%(req)s is required to create a network."
msgstr ""
-#: nova/exception.py:534
+#: nova/exception.py:471
#, python-format
msgid "Network %(network_id)s could not be found."
msgstr ""
-#: nova/exception.py:538
+#: nova/exception.py:475
#, python-format
msgid "Network could not be found for bridge %(bridge)s"
msgstr ""
-#: nova/exception.py:542
+#: nova/exception.py:479
#, python-format
msgid "Network could not be found for uuid %(uuid)s"
msgstr ""
-#: nova/exception.py:546
+#: nova/exception.py:483
#, python-format
msgid "Network could not be found with cidr %(cidr)s."
msgstr ""
-#: nova/exception.py:550
+#: nova/exception.py:487
#, python-format
msgid "Network could not be found for instance %(instance_id)s."
msgstr ""
-#: nova/exception.py:554
+#: nova/exception.py:491
msgid "No networks defined."
msgstr ""
-#: nova/exception.py:558
+#: nova/exception.py:495
#, python-format
msgid ""
"Either Network uuid %(network_uuid)s is not present or is not assigned to"
" the project %(project_id)s."
msgstr ""
-#: nova/exception.py:563
-#, python-format
-msgid "Host is not set to the network (%(network_id)s)."
-msgstr ""
-
-#: nova/exception.py:567
+#: nova/exception.py:500
msgid "Could not find the datastore reference(s) which the VM uses."
msgstr ""
-#: nova/exception.py:571
+#: nova/exception.py:504
#, python-format
msgid "Port %(port_id)s is still in use."
msgstr ""
-#: nova/exception.py:575
+#: nova/exception.py:508
#, python-format
msgid "Port %(port_id)s could not be found."
msgstr ""
-#: nova/exception.py:579
+#: nova/exception.py:512
#, python-format
msgid "No fixed IP associated with id %(id)s."
msgstr ""
-#: nova/exception.py:583
+#: nova/exception.py:516
#, python-format
msgid "Fixed ip not found for address %(address)s."
msgstr ""
-#: nova/exception.py:587
+#: nova/exception.py:520
#, python-format
msgid "Instance %(instance_uuid)s has zero fixed ips."
msgstr ""
-#: nova/exception.py:591
+#: nova/exception.py:524
#, python-format
msgid "Network host %(host)s has zero fixed ips in network %(network_id)s."
msgstr ""
-#: nova/exception.py:596
+#: nova/exception.py:529
#, python-format
msgid "Instance %(instance_uuid)s doesn't have fixed ip '%(ip)s'."
msgstr ""
-#: nova/exception.py:600
+#: nova/exception.py:533
#, python-format
msgid ""
"Fixed IP address (%(address)s) does not exist in network "
"(%(network_uuid)s)."
msgstr ""
-#: nova/exception.py:605
+#: nova/exception.py:538
#, python-format
msgid ""
"Fixed IP address %(address)s is already in use on instance "
"%(instance_uuid)s."
msgstr ""
-#: nova/exception.py:610
+#: nova/exception.py:543
#, python-format
msgid "More than one instance is associated with fixed ip address '%(address)s'."
msgstr ""
-#: nova/exception.py:615
+#: nova/exception.py:548
#, python-format
msgid "Fixed IP address %(address)s is invalid."
msgstr ""
-#: nova/exception.py:619
+#: nova/exception.py:552
msgid "Zero fixed ips available."
msgstr ""
-#: nova/exception.py:623
+#: nova/exception.py:556
msgid "Zero fixed ips could be found."
msgstr ""
-#: nova/exception.py:632
+#: nova/exception.py:565
#, python-format
msgid "Floating ip %(address)s already exists."
msgstr ""
-#: nova/exception.py:636
+#: nova/exception.py:569
#, python-format
msgid "Floating ip not found for id %(id)s."
msgstr ""
-#: nova/exception.py:640
+#: nova/exception.py:573
#, python-format
msgid "The DNS entry %(name)s already exists in domain %(domain)s."
msgstr ""
-#: nova/exception.py:644
+#: nova/exception.py:577
#, python-format
msgid "Floating ip not found for address %(address)s."
msgstr ""
-#: nova/exception.py:648
+#: nova/exception.py:581
#, python-format
msgid "Floating ip not found for host %(host)s."
msgstr ""
-#: nova/exception.py:652
+#: nova/exception.py:585
#, python-format
msgid "Multiple floating ips are found for address %(address)s."
msgstr ""
-#: nova/exception.py:656
+#: nova/exception.py:589
msgid "Floating ip pool not found."
msgstr ""
-#: nova/exception.py:661
+#: nova/exception.py:594
msgid "Zero floating ips available."
msgstr ""
-#: nova/exception.py:666
+#: nova/exception.py:599
#, python-format
msgid "Floating ip %(address)s is associated."
msgstr ""
-#: nova/exception.py:670
+#: nova/exception.py:603
#, python-format
msgid "Floating ip %(address)s is not associated."
msgstr ""
-#: nova/exception.py:674
+#: nova/exception.py:607
msgid "Zero floating ips exist."
msgstr ""
-#: nova/exception.py:678
+#: nova/exception.py:611
#, python-format
msgid "Interface %(interface)s not found."
msgstr ""
-#: nova/exception.py:682
+#: nova/exception.py:615
msgid "Cannot disassociate auto assigined floating ip"
msgstr ""
-#: nova/exception.py:686
+#: nova/exception.py:619
#, python-format
msgid "Keypair %(name)s not found for user %(user_id)s"
msgstr ""
-#: nova/exception.py:690
+#: nova/exception.py:623
#, python-format
msgid "Certificate %(certificate_id)s not found."
msgstr ""
-#: nova/exception.py:694
+#: nova/exception.py:627
#, python-format
msgid "Service %(service_id)s could not be found."
msgstr ""
-#: nova/exception.py:698
+#: nova/exception.py:631
#, python-format
msgid "Host %(host)s could not be found."
msgstr ""
-#: nova/exception.py:702
+#: nova/exception.py:635
#, python-format
msgid "Compute host %(host)s could not be found."
msgstr ""
-#: nova/exception.py:706
+#: nova/exception.py:639
#, python-format
msgid "Could not find binary %(binary)s on host %(host)s."
msgstr ""
-#: nova/exception.py:710
+#: nova/exception.py:643
#, python-format
msgid "Invalid reservation expiration %(expire)s."
msgstr ""
-#: nova/exception.py:714
+#: nova/exception.py:647
#, python-format
msgid ""
"Change would make usage less than 0 for the following resources: "
"%(unders)s"
msgstr ""
-#: nova/exception.py:719
+#: nova/exception.py:652
msgid "Quota could not be found"
msgstr ""
-#: nova/exception.py:723
+#: nova/exception.py:656
#, python-format
msgid "Unknown quota resources %(unknown)s."
msgstr ""
-#: nova/exception.py:727
+#: nova/exception.py:660
#, python-format
msgid "Quota for project %(project_id)s could not be found."
msgstr ""
-#: nova/exception.py:731
+#: nova/exception.py:664
#, python-format
msgid "Quota class %(class_name)s could not be found."
msgstr ""
-#: nova/exception.py:735
+#: nova/exception.py:668
#, python-format
msgid "Quota usage for project %(project_id)s could not be found."
msgstr ""
-#: nova/exception.py:739
+#: nova/exception.py:672
#, python-format
msgid "Quota reservation %(uuid)s could not be found."
msgstr ""
-#: nova/exception.py:743
+#: nova/exception.py:676
#, python-format
msgid "Quota exceeded for resources: %(overs)s"
msgstr ""
-#: nova/exception.py:747
+#: nova/exception.py:680
#, python-format
msgid "Security group %(security_group_id)s not found."
msgstr ""
-#: nova/exception.py:751
+#: nova/exception.py:684
#, python-format
msgid "Security group %(security_group_id)s not found for project %(project_id)s."
msgstr ""
-#: nova/exception.py:756
+#: nova/exception.py:689
#, python-format
msgid "Security group with rule %(rule_id)s not found."
msgstr ""
-#: nova/exception.py:760
+#: nova/exception.py:693
#, python-format
msgid ""
"Security group %(security_group_id)s is already associated with the "
"instance %(instance_id)s"
msgstr ""
-#: nova/exception.py:765
+#: nova/exception.py:698
#, python-format
msgid ""
"Security group %(security_group_id)s is not associated with the instance "
"%(instance_id)s"
msgstr ""
-#: nova/exception.py:770
+#: nova/exception.py:703
#, python-format
msgid "Migration %(migration_id)s could not be found."
msgstr ""
-#: nova/exception.py:774
+#: nova/exception.py:707
#, python-format
msgid "Migration not found for instance %(instance_id)s with status %(status)s."
msgstr ""
-#: nova/exception.py:779
+#: nova/exception.py:712
#, python-format
msgid "Console pool %(pool_id)s could not be found."
msgstr ""
-#: nova/exception.py:783
+#: nova/exception.py:716
#, python-format
msgid ""
"Console pool of type %(console_type)s for compute host %(compute_host)s "
"on proxy host %(host)s not found."
msgstr ""
-#: nova/exception.py:789
+#: nova/exception.py:722
#, python-format
msgid "Console %(console_id)s could not be found."
msgstr ""
-#: nova/exception.py:793
+#: nova/exception.py:726
#, python-format
msgid "Console for instance %(instance_uuid)s could not be found."
msgstr ""
-#: nova/exception.py:797
+#: nova/exception.py:730
#, python-format
msgid ""
"Console for instance %(instance_uuid)s in pool %(pool_id)s could not be "
"found."
msgstr ""
-#: nova/exception.py:802
+#: nova/exception.py:735
#, python-format
msgid "Invalid console type %(console_type)s "
msgstr ""
-#: nova/exception.py:806
+#: nova/exception.py:739
#, python-format
msgid "Instance type %(instance_type_id)s could not be found."
msgstr ""
-#: nova/exception.py:810
+#: nova/exception.py:743
#, python-format
msgid "Instance type with name %(instance_type_name)s could not be found."
msgstr ""
-#: nova/exception.py:815
+#: nova/exception.py:748
#, python-format
msgid "Flavor %(flavor_id)s could not be found."
msgstr ""
-#: nova/exception.py:819
+#: nova/exception.py:752
#, python-format
msgid "Flavor access not found for %(flavor_id) / %(project_id) combination."
msgstr ""
-#: nova/exception.py:824
+#: nova/exception.py:757
#, python-format
msgid "Scheduler Host Filter %(filter_name)s could not be found."
msgstr ""
-#: nova/exception.py:828
+#: nova/exception.py:761
#, python-format
msgid "Scheduler cost function %(cost_fn_str)s could not be found."
msgstr ""
-#: nova/exception.py:833
+#: nova/exception.py:766
#, python-format
msgid "Scheduler weight flag not found: %(flag_name)s"
msgstr ""
-#: nova/exception.py:837
+#: nova/exception.py:770
#, python-format
msgid "Instance %(instance_uuid)s has no metadata with key %(metadata_key)s."
msgstr ""
-#: nova/exception.py:842
+#: nova/exception.py:775
#, python-format
msgid ""
"Instance %(instance_uuid)s has no system metadata with key "
"%(metadata_key)s."
msgstr ""
-#: nova/exception.py:847
+#: nova/exception.py:780
#, python-format
msgid ""
"Instance Type %(instance_type_id)s has no extra specs with key "
"%(extra_specs_key)s."
msgstr ""
-#: nova/exception.py:852
+#: nova/exception.py:785
#, python-format
msgid "File %(file_path)s could not be found."
msgstr ""
-#: nova/exception.py:856
+#: nova/exception.py:789
msgid "Zero files could be found."
msgstr ""
-#: nova/exception.py:860
+#: nova/exception.py:793
#, python-format
msgid "Virtual switch associated with the network adapter %(adapter)s not found."
msgstr ""
-#: nova/exception.py:865
+#: nova/exception.py:798
#, python-format
msgid "Network adapter %(adapter)s could not be found."
msgstr ""
-#: nova/exception.py:869
+#: nova/exception.py:802
#, python-format
msgid "Class %(class_name)s could not be found: %(exception)s"
msgstr ""
-#: nova/exception.py:873
+#: nova/exception.py:806
msgid "Action not allowed."
msgstr ""
-#: nova/exception.py:877
+#: nova/exception.py:810
msgid "Rotation is not allowed for snapshots"
msgstr ""
-#: nova/exception.py:881
+#: nova/exception.py:814
msgid "Rotation param is required for backup image_type"
msgstr ""
-#: nova/exception.py:885
+#: nova/exception.py:818
#, python-format
msgid "Key pair %(key_name)s already exists."
msgstr ""
-#: nova/exception.py:889
+#: nova/exception.py:822
#, python-format
msgid "Instance %(name)s already exists."
msgstr ""
-#: nova/exception.py:893
+#: nova/exception.py:826
#, python-format
msgid "Instance Type with name %(name)s already exists."
msgstr ""
-#: nova/exception.py:897
+#: nova/exception.py:830
#, python-format
msgid "Instance Type with ID %(flavor_id)s already exists."
msgstr ""
-#: nova/exception.py:901
+#: nova/exception.py:834
#, python-format
msgid ""
"Flavor access alreay exists for flavor %(flavor_id)s and project "
"%(project_id)s combination."
msgstr ""
-#: nova/exception.py:906
-#, python-format
-msgid "Volume Type %(name)s already exists."
-msgstr ""
-
-#: nova/exception.py:910
+#: nova/exception.py:839
#, python-format
msgid "%(path)s is not on shared storage: %(reason)s"
msgstr ""
-#: nova/exception.py:914
+#: nova/exception.py:843
#, python-format
msgid "%(path)s is not on local storage: %(reason)s"
msgstr ""
-#: nova/exception.py:918
+#: nova/exception.py:847
msgid "Migration error"
msgstr ""
-#: nova/exception.py:922
+#: nova/exception.py:851
#, python-format
msgid "Malformed message body: %(reason)s"
msgstr ""
-#: nova/exception.py:928
+#: nova/exception.py:857
#, python-format
msgid "Could not find config at %(path)s"
msgstr ""
-#: nova/exception.py:932
+#: nova/exception.py:861
#, python-format
msgid "Could not load paste app '%(name)s' from %(path)s"
msgstr ""
-#: nova/exception.py:936
+#: nova/exception.py:865
msgid "When resizing, instances must change flavor!"
msgstr ""
-#: nova/exception.py:940
+#: nova/exception.py:869
msgid "Image is larger than instance type allows"
msgstr ""
-#: nova/exception.py:944
+#: nova/exception.py:873
msgid "Instance type's memory is too small for requested image."
msgstr ""
-#: nova/exception.py:948
+#: nova/exception.py:877
msgid "Instance type's disk is too small for requested image."
msgstr ""
-#: nova/exception.py:952
+#: nova/exception.py:881
#, python-format
msgid "Insufficient free memory on compute node to start %(uuid)s."
msgstr ""
-#: nova/exception.py:956
+#: nova/exception.py:885
msgid "Could not fetch bandwidth/cpu/disk metrics for this host."
msgstr ""
-#: nova/exception.py:960
+#: nova/exception.py:889
#, python-format
msgid "No valid host was found. %(reason)s"
msgstr ""
-#: nova/exception.py:964
+#: nova/exception.py:893
#, python-format
msgid "Host %(host)s is not up or doesn't exist."
msgstr ""
-#: nova/exception.py:968
+#: nova/exception.py:897
msgid "Quota exceeded"
msgstr ""
-#: nova/exception.py:975
+#: nova/exception.py:904
#, python-format
msgid ""
"Quota exceeded for %(overs)s: Requested %(req)s, but already used "
"%(used)d of %(allowed)d %(resource)s"
msgstr ""
-#: nova/exception.py:980
-msgid "Maximum volume size exceeded"
-msgstr ""
-
-#: nova/exception.py:984
-#, python-format
-msgid "Maximum number of volumes allowed (%(allowed)d) exceeded"
-msgstr ""
-
-#: nova/exception.py:988
+#: nova/exception.py:909
msgid "Maximum number of floating ips exceeded"
msgstr ""
-#: nova/exception.py:992
+#: nova/exception.py:913
#, python-format
msgid "Maximum number of metadata items exceeds %(allowed)d"
msgstr ""
-#: nova/exception.py:996
+#: nova/exception.py:917
msgid "Personality file limit exceeded"
msgstr ""
-#: nova/exception.py:1000
+#: nova/exception.py:921
msgid "Personality file path too long"
msgstr ""
-#: nova/exception.py:1004
+#: nova/exception.py:925
msgid "Personality file content too long"
msgstr ""
-#: nova/exception.py:1008
+#: nova/exception.py:929
msgid "Maximum number of key pairs exceeded"
msgstr ""
-#: nova/exception.py:1012
+#: nova/exception.py:933
msgid "Maximum number of security groups or rules exceeded"
msgstr ""
-#: nova/exception.py:1016
+#: nova/exception.py:937
#, python-format
msgid ""
"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: "
"%(reason)s."
msgstr ""
-#: nova/exception.py:1021
+#: nova/exception.py:942
#, python-format
msgid "Aggregate %(aggregate_id)s could not be found."
msgstr ""
-#: nova/exception.py:1025
+#: nova/exception.py:946
#, python-format
msgid "Aggregate %(aggregate_name)s already exists."
msgstr ""
-#: nova/exception.py:1029
+#: nova/exception.py:950
#, python-format
msgid "Aggregate %(aggregate_id)s has no host %(host)s."
msgstr ""
-#: nova/exception.py:1033
+#: nova/exception.py:954
#, python-format
msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s."
msgstr ""
-#: nova/exception.py:1038
+#: nova/exception.py:959
#, python-format
msgid "Aggregate %(aggregate_id)s already has host %(host)s."
msgstr ""
-#: nova/exception.py:1042
-#, python-format
-msgid "Detected more than one volume with name %(vol_name)s"
-msgstr ""
-
-#: nova/exception.py:1046
-#, python-format
-msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s"
-msgstr ""
-
-#: nova/exception.py:1051
-#, python-format
-msgid "Bad or unexpected response from the storage volume backend API: %(data)s"
-msgstr ""
-
-#: nova/exception.py:1056
-msgid "Unknown NFS exception"
-msgstr ""
-
-#: nova/exception.py:1060
-msgid "No mounted NFS shares found"
-msgstr ""
-
-#: nova/exception.py:1064
-#, python-format
-msgid "There is no share which can host %(volume_size)sG"
-msgstr ""
-
-#: nova/exception.py:1068
+#: nova/exception.py:963
msgid "Unable to create instance type"
msgstr ""
-#: nova/exception.py:1072
+#: nova/exception.py:967
#, python-format
msgid "Failed to set admin password on %(instance)s because %(reason)s"
msgstr ""
-#: nova/exception.py:1078
-msgid "Bad response from SolidFire API"
-msgstr ""
-
-#: nova/exception.py:1082
-#, python-format
-msgid "Error in SolidFire API response: data=%(data)s"
-msgstr ""
-
-#: nova/exception.py:1086
+#: nova/exception.py:973
#, python-format
msgid "Detected existing vlan with id %(vlan)d"
msgstr ""
-#: nova/exception.py:1090
+#: nova/exception.py:977
#, python-format
msgid "Instance %(instance_id)s could not be found."
msgstr ""
-#: nova/exception.py:1094
+#: nova/exception.py:981
#, python-format
msgid "Marker %(marker)s could not be found."
msgstr ""
-#: nova/exception.py:1098
+#: nova/exception.py:985
#, python-format
msgid "Invalid id: %(val)s (expecting \"i-...\")."
msgstr ""
-#: nova/exception.py:1102
+#: nova/exception.py:989
#, python-format
msgid "Could not fetch image %(image_id)s"
msgstr ""
-#: nova/exception.py:1106
+#: nova/exception.py:993
#, python-format
msgid "Task %(task_name)s is already running on host %(host)s"
msgstr ""
-#: nova/exception.py:1110
+#: nova/exception.py:997
#, python-format
msgid "Task %(task_name)s is not running on host %(host)s"
msgstr ""
-#: nova/exception.py:1114
+#: nova/exception.py:1001
#, python-format
msgid "Instance %(instance_uuid)s is locked"
msgstr ""
-#: nova/exception.py:1118
+#: nova/exception.py:1005
#, python-format
msgid "Could not mount vfat config drive. %(operation)s failed. Error: %(error)s"
msgstr ""
-#: nova/exception.py:1123
+#: nova/exception.py:1010
#, python-format
msgid "Unknown config drive format %(format)s. Select one of iso9660 or vfat."
msgstr ""
-#: nova/exception.py:1128
+#: nova/exception.py:1015
#, python-format
msgid ""
"User data too large. User data must be no larger than %(maxsize)s bytes "
"once base64 encoded. Your data is %(length)d bytes"
msgstr ""
-#: nova/exception.py:1134
+#: nova/exception.py:1021
msgid "User data needs to be valid base 64."
msgstr ""
-#: nova/exception.py:1138
+#: nova/exception.py:1025
#, python-format
msgid ""
"unexpected task state: expecting %(expected)s but the actual state is "
"%(actual)s"
msgstr ""
-#: nova/exception.py:1143
+#: nova/exception.py:1030
#, python-format
msgid "The CA file for %(project)s could not be found"
msgstr ""
-#: nova/exception.py:1147
+#: nova/exception.py:1034
#, python-format
msgid "The CRL file for %(project)s could not be found"
msgstr ""
-#: nova/manager.py:166
+#: nova/manager.py:165
#, python-format
msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run"
msgstr ""
-#: nova/manager.py:172
+#: nova/manager.py:171
#, python-format
msgid "Running periodic task %(full_task_name)s"
msgstr ""
-#: nova/manager.py:182
+#: nova/manager.py:181
#, python-format
msgid "Error during %(full_task_name)s: %(e)s"
msgstr ""
-#: nova/manager.py:255
+#: nova/manager.py:256
msgid "Notifying Schedulers of capabilities ..."
msgstr ""
-#: nova/notifications.py:112 nova/notifications.py:152
+#: nova/notifications.py:113 nova/notifications.py:153
msgid "Failed to send state update notification"
msgstr ""
-#: nova/policy.py:32
+#: nova/policy.py:33
msgid "JSON file representing policy"
msgstr ""
-#: nova/policy.py:35
+#: nova/policy.py:36
msgid "Rule checked when requested rule is not found"
msgstr ""
-#: nova/quota.py:726
+#: nova/quota.py:721
#, python-format
msgid "Created reservations %(reservations)s"
msgstr ""
-#: nova/quota.py:745
+#: nova/quota.py:740
#, python-format
msgid "Failed to commit reservations %(reservations)s"
msgstr ""
-#: nova/quota.py:763
+#: nova/quota.py:758
#, python-format
msgid "Failed to roll back reservations %(reservations)s"
msgstr ""
-#: nova/service.py:170
-msgid "Full set of FLAGS:"
+#: nova/service.py:173
+msgid "Full set of CONF:"
msgstr ""
-#: nova/service.py:177
+#: nova/service.py:180
#, python-format
msgid "%(flag)s : FLAG SET "
msgstr ""
-#: nova/service.py:187 nova/service.py:285
+#: nova/service.py:190 nova/service.py:288
#, python-format
msgid "Caught %s, exiting"
msgstr ""
-#: nova/service.py:231
+#: nova/service.py:234
msgid "Parent process has died unexpectedly, exiting"
msgstr ""
-#: nova/service.py:267
+#: nova/service.py:270
msgid "Forking too fast, sleeping"
msgstr ""
-#: nova/service.py:290
+#: nova/service.py:293
msgid "Unhandled exception"
msgstr ""
-#: nova/service.py:297
+#: nova/service.py:300
#, python-format
msgid "Started child %d"
msgstr ""
-#: nova/service.py:307
+#: nova/service.py:310
#, python-format
msgid "Starting %d workers"
msgstr ""
-#: nova/service.py:321
+#: nova/service.py:324
#, python-format
msgid "Child %(pid)d killed by signal %(sig)d"
msgstr ""
-#: nova/service.py:324
+#: nova/service.py:327
#, python-format
msgid "Child %(pid)d exited with status %(code)d"
msgstr ""
-#: nova/service.py:327
+#: nova/service.py:330
#, python-format
msgid "pid %d not in child list"
msgstr ""
-#: nova/service.py:347
+#: nova/service.py:350
#, python-format
msgid "Caught %s, stopping children"
msgstr ""
-#: nova/service.py:358
+#: nova/service.py:361
#, python-format
msgid "Waiting on %d children to exit"
msgstr ""
-#: nova/service.py:387
+#: nova/service.py:391
#, python-format
msgid "Starting %(topic)s node (version %(vcs_string)s)"
msgstr ""
-#: nova/service.py:403
+#: nova/service.py:410 nova/openstack/common/rpc/service.py:47
#, python-format
msgid "Creating Consumer connection for Service %s"
msgstr ""
-#: nova/service.py:495
+#: nova/service.py:502
msgid "Service killed that has no database entry"
msgstr ""
-#: nova/service.py:532
+#: nova/service.py:539
msgid "The service database object disappeared, Recreating it."
msgstr ""
-#: nova/service.py:547
+#: nova/service.py:554
msgid "Recovered model server connection!"
msgstr ""
-#: nova/service.py:553
+#: nova/service.py:560
msgid "model server went away"
msgstr ""
-#: nova/service.py:644
+#: nova/service.py:654
msgid "serve() can only be called once"
msgstr ""
-#: nova/utils.py:170
+#: nova/utils.py:169
#, python-format
msgid "Got unknown keyword args to utils.execute: %r"
msgstr ""
-#: nova/utils.py:181
+#: nova/utils.py:180
#, python-format
msgid "Running cmd (subprocess): %s"
msgstr ""
-#: nova/utils.py:197 nova/utils.py:275 nova/virt/powervm/common.py:82
+#: nova/utils.py:204 nova/utils.py:282 nova/virt/powervm/common.py:82
#, python-format
msgid "Result was %s"
msgstr ""
-#: nova/utils.py:210
+#: nova/utils.py:217
#, python-format
msgid "%r failed. Retrying."
msgstr ""
-#: nova/utils.py:250
+#: nova/utils.py:257
#, python-format
msgid "Running cmd (SSH): %s"
msgstr ""
-#: nova/utils.py:252
+#: nova/utils.py:259
msgid "Environment not supported over SSH"
msgstr ""
-#: nova/utils.py:256
+#: nova/utils.py:263
msgid "process_input not supported over SSH"
msgstr ""
-#: nova/utils.py:291
+#: nova/utils.py:298
#, python-format
msgid "debug in callback: %s"
msgstr ""
-#: nova/utils.py:450
+#: nova/utils.py:457
#, python-format
msgid "Link Local address is not found.:%s"
msgstr ""
-#: nova/utils.py:453
+#: nova/utils.py:460
#, python-format
msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s"
msgstr ""
-#: nova/utils.py:488
+#: nova/utils.py:495
#, python-format
msgid "Invalid backend: %s"
msgstr ""
-#: nova/utils.py:549
+#: nova/utils.py:556
msgid "in looping call"
msgstr ""
-#: nova/utils.py:609
+#: nova/utils.py:616
#, python-format
msgid "Unknown byte multiplier: %s"
msgstr ""
-#: nova/utils.py:738
+#: nova/utils.py:745
#, python-format
msgid "Expected object of type: %s"
msgstr ""
-#: nova/utils.py:767
+#: nova/utils.py:774
#, python-format
msgid "Invalid server_string: %s"
msgstr ""
-#: nova/utils.py:895
+#: nova/utils.py:898
#, python-format
msgid "timefunc: '%(name)s' took %(total_time).2f secs"
msgstr ""
-#: nova/utils.py:973
+#: nova/utils.py:976
#, python-format
msgid "Reloading cached file %s"
msgstr ""
-#: nova/utils.py:1091 nova/virt/configdrive.py:151
+#: nova/utils.py:1094 nova/virt/configdrive.py:156
#, python-format
msgid "Could not remove tmpdir: %s"
msgstr ""
-#: nova/wsgi.py:85
+#: nova/wsgi.py:86
#, python-format
msgid "%(name)s listening on %(host)s:%(port)s"
msgstr ""
-#: nova/wsgi.py:109
+#: nova/wsgi.py:110
msgid "Stopping WSGI server."
msgstr ""
-#: nova/wsgi.py:127
+#: nova/wsgi.py:128
msgid "WSGI server has stopped."
msgstr ""
-#: nova/wsgi.py:196
+#: nova/wsgi.py:197
msgid "You must implement __call__"
msgstr ""
-#: nova/wsgi.py:382
+#: nova/wsgi.py:383
#, python-format
msgid "Loading app %(name)s from %(path)s"
msgstr ""
@@ -1693,149 +1568,149 @@ msgid "Detach Volume Failed."
msgstr ""
#: nova/api/ec2/cloud.py:865 nova/api/ec2/cloud.py:922
-#: nova/api/ec2/cloud.py:1459 nova/api/ec2/cloud.py:1474
+#: nova/api/ec2/cloud.py:1458 nova/api/ec2/cloud.py:1473
#, python-format
msgid "attribute not supported: %s"
msgstr ""
-#: nova/api/ec2/cloud.py:988
+#: nova/api/ec2/cloud.py:987
#, python-format
msgid "vol = %s\n"
msgstr ""
-#: nova/api/ec2/cloud.py:1139
+#: nova/api/ec2/cloud.py:1138
msgid "Allocate address"
msgstr ""
-#: nova/api/ec2/cloud.py:1143
+#: nova/api/ec2/cloud.py:1142
msgid "No more floating IPs available"
msgstr ""
-#: nova/api/ec2/cloud.py:1147
+#: nova/api/ec2/cloud.py:1146
#, python-format
msgid "Release address %s"
msgstr ""
-#: nova/api/ec2/cloud.py:1152
+#: nova/api/ec2/cloud.py:1151
msgid "Unable to release IP Address."
msgstr ""
-#: nova/api/ec2/cloud.py:1155
+#: nova/api/ec2/cloud.py:1154
#, python-format
msgid "Associate address %(public_ip)s to instance %(instance_id)s"
msgstr ""
-#: nova/api/ec2/cloud.py:1163
+#: nova/api/ec2/cloud.py:1162
msgid "Unable to associate IP Address, no fixed_ips."
msgstr ""
-#: nova/api/ec2/cloud.py:1171
+#: nova/api/ec2/cloud.py:1170
#: nova/api/openstack/compute/contrib/floating_ips.py:257
#, python-format
msgid "multiple fixed_ips exist, using the first: %s"
msgstr ""
-#: nova/api/ec2/cloud.py:1180
+#: nova/api/ec2/cloud.py:1179
msgid "Floating ip is already associated."
msgstr ""
-#: nova/api/ec2/cloud.py:1183
+#: nova/api/ec2/cloud.py:1182
msgid "l3driver call to add floating ip failed."
msgstr ""
-#: nova/api/ec2/cloud.py:1186
+#: nova/api/ec2/cloud.py:1185
msgid "Error, unable to associate floating ip."
msgstr ""
-#: nova/api/ec2/cloud.py:1194
+#: nova/api/ec2/cloud.py:1193
#, python-format
msgid "Disassociate address %s"
msgstr ""
-#: nova/api/ec2/cloud.py:1199
+#: nova/api/ec2/cloud.py:1198
msgid "Floating ip is not associated."
msgstr ""
-#: nova/api/ec2/cloud.py:1202
+#: nova/api/ec2/cloud.py:1201
#: nova/api/openstack/compute/contrib/floating_ips.py:100
msgid "Cannot disassociate auto assigned floating ip"
msgstr ""
-#: nova/api/ec2/cloud.py:1229
+#: nova/api/ec2/cloud.py:1228
msgid "Image must be available"
msgstr ""
-#: nova/api/ec2/cloud.py:1261
+#: nova/api/ec2/cloud.py:1260
msgid "Going to start terminating instances"
msgstr ""
-#: nova/api/ec2/cloud.py:1271
+#: nova/api/ec2/cloud.py:1270
#, python-format
msgid "Reboot instance %r"
msgstr ""
-#: nova/api/ec2/cloud.py:1280
+#: nova/api/ec2/cloud.py:1279
msgid "Going to stop instances"
msgstr ""
-#: nova/api/ec2/cloud.py:1289
+#: nova/api/ec2/cloud.py:1288
msgid "Going to start instances"
msgstr ""
-#: nova/api/ec2/cloud.py:1380
+#: nova/api/ec2/cloud.py:1379
#, python-format
msgid "De-registering image %s"
msgstr ""
-#: nova/api/ec2/cloud.py:1396
+#: nova/api/ec2/cloud.py:1395
msgid "imageLocation is required"
msgstr ""
-#: nova/api/ec2/cloud.py:1415
+#: nova/api/ec2/cloud.py:1414
#, python-format
msgid "Registered image %(image_location)s with id %(image_id)s"
msgstr ""
-#: nova/api/ec2/cloud.py:1477
+#: nova/api/ec2/cloud.py:1476
msgid "user or group not specified"
msgstr ""
-#: nova/api/ec2/cloud.py:1479
+#: nova/api/ec2/cloud.py:1478
msgid "only group \"all\" is supported"
msgstr ""
-#: nova/api/ec2/cloud.py:1481
+#: nova/api/ec2/cloud.py:1480
msgid "operation_type must be add or remove"
msgstr ""
-#: nova/api/ec2/cloud.py:1483
+#: nova/api/ec2/cloud.py:1482
#, python-format
msgid "Updating image %s publicity"
msgstr ""
-#: nova/api/ec2/cloud.py:1496
+#: nova/api/ec2/cloud.py:1495
#, python-format
msgid "Not allowed to modify attributes for image %s"
msgstr ""
-#: nova/api/ec2/cloud.py:1525
+#: nova/api/ec2/cloud.py:1524
#, python-format
msgid ""
"Invalid value '%(ec2_instance_id)s' for instanceId. Instance does not "
"have a volume attached at root (%(root)s)"
msgstr ""
-#: nova/api/ec2/cloud.py:1555
+#: nova/api/ec2/cloud.py:1554
#, python-format
msgid "Couldn't stop instance with in %d sec"
msgstr ""
-#: nova/api/ec2/cloud.py:1573
+#: nova/api/ec2/cloud.py:1572
#, python-format
msgid "image of %(instance)s at %(now)s"
msgstr ""
-#: nova/api/ec2/cloud.py:1606
+#: nova/api/ec2/cloud.py:1605
msgid "Invalid CIDR"
msgstr ""
@@ -1862,23 +1737,23 @@ msgstr ""
msgid "%(url)s returned with HTTP %(status)d"
msgstr ""
-#: nova/api/openstack/__init__.py:126
+#: nova/api/openstack/__init__.py:135
msgid "Must specify an ExtensionManager class"
msgstr ""
-#: nova/api/openstack/__init__.py:137
+#: nova/api/openstack/__init__.py:146
#, python-format
msgid "Extended resource: %s"
msgstr ""
-#: nova/api/openstack/__init__.py:171
+#: nova/api/openstack/__init__.py:180
#, python-format
msgid ""
"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such "
"resource"
msgstr ""
-#: nova/api/openstack/__init__.py:176
+#: nova/api/openstack/__init__.py:185
#, python-format
msgid "Extension %(ext_name)s extending resource: %(collection)s"
msgstr ""
@@ -2158,7 +2033,7 @@ msgid ""
"%(unit_string)s."
msgstr ""
-#: nova/api/openstack/compute/limits.py:271
+#: nova/api/openstack/compute/limits.py:272
msgid "This request was rate-limited."
msgstr ""
@@ -2560,6 +2435,16 @@ msgstr ""
msgid "%s must be either 'MANUAL' or 'AUTO'."
msgstr ""
+#: nova/api/openstack/compute/contrib/fixed_ips.py:42
+#, python-format
+msgid "Fixed IP %s has been deleted"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/fixed_ips.py:74
+#, python-format
+msgid "Fixed IP %s not found"
+msgstr ""
+
#: nova/api/openstack/compute/contrib/flavor_access.py:80
#: nova/api/openstack/compute/contrib/flavor_access.py:104
msgid "Flavor not found."
@@ -2632,6 +2517,10 @@ msgstr ""
msgid "Error. Unable to associate floating ip"
msgstr ""
+#: nova/api/openstack/compute/contrib/fping.py:58
+msgid "fping utility is not found."
+msgstr ""
+
#: nova/api/openstack/compute/contrib/hosts.py:122
#, python-format
msgid "Host '%s' could not be found."
@@ -2850,7 +2739,7 @@ msgstr ""
msgid "Netmask to push into openvpn config"
msgstr ""
-#: nova/cloudpipe/pipelib.py:109
+#: nova/cloudpipe/pipelib.py:106
#, python-format
msgid "Launching VPN for %s"
msgstr ""
@@ -2863,1008 +2752,960 @@ msgstr ""
msgid "Unknown sort direction, must be 'desc' or 'asc'"
msgstr ""
-#: nova/compute/api.py:224
+#: nova/compute/api.py:223
msgid "Cannot run any more instances of this type."
msgstr ""
-#: nova/compute/api.py:231
+#: nova/compute/api.py:230
#, python-format
msgid "Can only run %s more instances of this type."
msgstr ""
-#: nova/compute/api.py:240
+#: nova/compute/api.py:239
#, python-format
msgid ""
"%(overs)s quota exceeded for %(pid)s, tried to run %(min_count)s "
"instances. %(msg)s"
msgstr ""
-#: nova/compute/api.py:260
+#: nova/compute/api.py:259
#, python-format
msgid ""
"Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata "
"properties"
msgstr ""
-#: nova/compute/api.py:270
+#: nova/compute/api.py:269
msgid "Metadata property key blank"
msgstr ""
-#: nova/compute/api.py:274
+#: nova/compute/api.py:273
msgid "Metadata property key greater than 255 characters"
msgstr ""
-#: nova/compute/api.py:278
+#: nova/compute/api.py:277
msgid "Metadata property value greater than 255 characters"
msgstr ""
-#: nova/compute/api.py:502
+#: nova/compute/api.py:501
#, python-format
msgid "Going to run %s instances..."
msgstr ""
-#: nova/compute/api.py:574
+#: nova/compute/api.py:573
#, python-format
msgid "bdm %s"
msgstr ""
-#: nova/compute/api.py:601
+#: nova/compute/api.py:600
#, python-format
msgid "block_device_mapping %s"
msgstr ""
#: nova/compute/api.py:833
-msgid "Going to try to soft delete instance"
+msgid "instance termination disabled"
msgstr ""
-#: nova/compute/api.py:850
-msgid "No host for instance, deleting immediately"
+#: nova/compute/api.py:928
+msgid "host for instance is down, deleting from database"
msgstr ""
-#: nova/compute/api.py:950
-msgid "host for instance is down, deleting from database"
+#: nova/compute/api.py:972
+msgid "Going to try to soft delete instance"
msgstr ""
#: nova/compute/api.py:994
msgid "Going to try to terminate instance"
msgstr ""
-#: nova/compute/api.py:1034
+#: nova/compute/api.py:1041
msgid "Going to try to stop instance"
msgstr ""
-#: nova/compute/api.py:1048
+#: nova/compute/api.py:1055
msgid "Going to try to start instance"
msgstr ""
-#: nova/compute/api.py:1112
+#: nova/compute/api.py:1119
#, python-format
msgid "Searching by: %s"
msgstr ""
-#: nova/compute/api.py:1247
+#: nova/compute/api.py:1254
#, python-format
msgid "Image type not recognized %s"
msgstr ""
-#: nova/compute/api.py:1356
+#: nova/compute/api.py:1363
#, python-format
msgid "snapshot for %s"
msgstr ""
-#: nova/compute/api.py:1678
+#: nova/compute/api.py:1685
msgid "flavor_id is None. Assuming migration."
msgstr ""
-#: nova/compute/api.py:1687
+#: nova/compute/api.py:1694
#, python-format
msgid ""
"Old instance type %(current_instance_type_name)s, new instance type "
"%(new_instance_type_name)s"
msgstr ""
-#: nova/compute/api.py:1729
+#: nova/compute/api.py:1736
#, python-format
msgid "%(overs)s quota exceeded for %(pid)s, tried to resize instance. %(msg)s"
msgstr ""
-#: nova/compute/api.py:1901
+#: nova/compute/api.py:1912
msgid "Locking"
msgstr ""
-#: nova/compute/api.py:1909
+#: nova/compute/api.py:1920
msgid "Unlocking"
msgstr ""
-#: nova/compute/api.py:1977
+#: nova/compute/api.py:1988
msgid "Volume must be attached in order to detach."
msgstr ""
-#: nova/compute/api.py:2062
+#: nova/compute/api.py:2073
#, python-format
msgid "Going to try to live migrate instance to %s"
msgstr ""
-#: nova/compute/api.py:2211
+#: nova/compute/api.py:2223
msgid "Keypair name contains unsafe characters"
msgstr ""
-#: nova/compute/api.py:2215
+#: nova/compute/api.py:2227
msgid "Keypair name must be between 1 and 255 characters long"
msgstr ""
-#: nova/compute/api.py:2316
+#: nova/compute/api.py:2328
#, python-format
msgid "Security group %s is not a string or unicode"
msgstr ""
-#: nova/compute/api.py:2319
+#: nova/compute/api.py:2331
#, python-format
msgid "Security group %s cannot be empty."
msgstr ""
-#: nova/compute/api.py:2327
+#: nova/compute/api.py:2339
#, python-format
msgid ""
"Value (%(value)s) for parameter Group%(property)s is invalid. Content "
"limited to '%(allowed)'."
msgstr ""
-#: nova/compute/api.py:2333
+#: nova/compute/api.py:2345
#, python-format
msgid "Security group %s should not be greater than 255 characters."
msgstr ""
-#: nova/compute/api.py:2353
+#: nova/compute/api.py:2365
msgid "Quota exceeded, too many security groups."
msgstr ""
-#: nova/compute/api.py:2356
+#: nova/compute/api.py:2368
#, python-format
msgid "Create Security Group %s"
msgstr ""
-#: nova/compute/api.py:2363
+#: nova/compute/api.py:2375
#, python-format
msgid "Security group %s already exists"
msgstr ""
-#: nova/compute/api.py:2428
+#: nova/compute/api.py:2440
msgid "Security group is still in use"
msgstr ""
-#: nova/compute/api.py:2436
+#: nova/compute/api.py:2448
msgid "Failed to update usages deallocating security group"
msgstr ""
-#: nova/compute/api.py:2439
+#: nova/compute/api.py:2451
#, python-format
msgid "Delete security group %s"
msgstr ""
-#: nova/compute/api.py:2696
+#: nova/compute/api.py:2708
#, python-format
msgid "Rule (%s) not found"
msgstr ""
-#: nova/compute/api.py:2705
+#: nova/compute/api.py:2717
msgid "Quota exceeded, too many security group rules."
msgstr ""
-#: nova/compute/api.py:2708
+#: nova/compute/api.py:2720
#, python-format
msgid "Authorize security group ingress %s"
msgstr ""
-#: nova/compute/api.py:2719
+#: nova/compute/api.py:2731
#, python-format
msgid "Revoke security group ingress %s"
msgstr ""
-#: nova/compute/instance_types.py:63
+#: nova/compute/claims.py:94 nova/compute/claims.py:218
+#, python-format
+msgid "Aborting claim: %s"
+msgstr ""
+
+#: nova/compute/claims.py:116
+#, python-format
+msgid ""
+"Attempting claim: memory %(memory_mb)d MB, disk %(disk_gb)d GB, VCPUs "
+"%(vcpus)d"
+msgstr ""
+
+#: nova/compute/claims.py:128
+msgid "Claim successful"
+msgstr ""
+
+#: nova/compute/claims.py:130
+msgid "Claim failed"
+msgstr ""
+
+#: nova/compute/claims.py:135
+msgid "Memory"
+msgstr ""
+
+#: nova/compute/claims.py:144
+msgid "Disk"
+msgstr ""
+
+#: nova/compute/claims.py:153
+msgid "CPU"
+msgstr ""
+
+#: nova/compute/claims.py:165
+#, python-format
+msgid "Total %(type_)s: %(total)d %(unit)s, used: %(used)d %(unit)s"
+msgstr ""
+
+#: nova/compute/claims.py:170
+#, python-format
+msgid "%(type_)s limit not specified, defaulting to unlimited"
+msgstr ""
+
+#: nova/compute/claims.py:177
+#, python-format
+msgid "%(type_)s limit: %(limit)d %(unit)s, free: %(free)d %(unit)s"
+msgstr ""
+
+#: nova/compute/claims.py:184
+#, python-format
+msgid ""
+"Unable to claim resources. Free %(type_)s %(free)d %(unit)s < requested "
+"%(requested)d %(unit)s"
+msgstr ""
+
+#: nova/compute/instance_types.py:64
msgid "names can only contain [a-zA-Z0-9_.- ]"
msgstr ""
-#: nova/compute/instance_types.py:72 nova/compute/instance_types.py:80
+#: nova/compute/instance_types.py:73 nova/compute/instance_types.py:81
msgid "create arguments must be positive integers"
msgstr ""
-#: nova/compute/instance_types.py:94
+#: nova/compute/instance_types.py:91
+msgid "is_public must be a boolean"
+msgstr ""
+
+#: nova/compute/instance_types.py:98
#, python-format
msgid "DB error: %s"
msgstr ""
-#: nova/compute/instance_types.py:104
+#: nova/compute/instance_types.py:108
#, python-format
msgid "Instance type %s not found for deletion"
msgstr ""
-#: nova/compute/manager.py:163
+#: nova/compute/manager.py:164
msgid "Possibly task preempted."
msgstr ""
-#: nova/compute/manager.py:243
+#: nova/compute/manager.py:278
msgid "Compute driver option required, but not specified"
msgstr ""
-#: nova/compute/manager.py:248
+#: nova/compute/manager.py:283
#, python-format
msgid "Loading compute driver '%s'"
msgstr ""
-#: nova/compute/manager.py:255
+#: nova/compute/manager.py:290
#, python-format
msgid "Unable to load the virtualization driver: %s"
msgstr ""
-#: nova/compute/manager.py:290
+#: nova/compute/manager.py:334
msgid "Instance has been destroyed from under us while trying to set it to ERROR"
msgstr ""
-#: nova/compute/manager.py:318
+#: nova/compute/manager.py:362
#, python-format
msgid "Current state is %(drv_state)s, state in DB is %(db_state)s."
msgstr ""
-#: nova/compute/manager.py:332
+#: nova/compute/manager.py:376
msgid "Rebooting instance after nova-compute restart."
msgstr ""
-#: nova/compute/manager.py:346
+#: nova/compute/manager.py:390
msgid "Hypervisor driver does not support resume guests"
msgstr ""
-#: nova/compute/manager.py:356
+#: nova/compute/manager.py:400
msgid "Hypervisor driver does not support firewall rules"
msgstr ""
-#: nova/compute/manager.py:375
+#: nova/compute/manager.py:419
msgid "Checking state"
msgstr ""
-#: nova/compute/manager.py:448
+#: nova/compute/manager.py:496
#, python-format
msgid "Setting up bdm %s"
msgstr ""
-#: nova/compute/manager.py:541
+#: nova/compute/manager.py:589
msgid "Failed to dealloc network for deleted instance"
msgstr ""
-#: nova/compute/manager.py:564
+#: nova/compute/manager.py:612
#, python-format
msgid "Error: %s"
msgstr ""
-#: nova/compute/manager.py:597 nova/compute/manager.py:1743
+#: nova/compute/manager.py:645 nova/compute/manager.py:1819
msgid "Error trying to reschedule"
msgstr ""
-#: nova/compute/manager.py:614
+#: nova/compute/manager.py:662
msgid "Retry info not present, will not reschedule"
msgstr ""
-#: nova/compute/manager.py:619
+#: nova/compute/manager.py:667
msgid "No request spec, will not reschedule"
msgstr ""
-#: nova/compute/manager.py:625
+#: nova/compute/manager.py:673
#, python-format
msgid "Re-scheduling %(method)s: attempt %(num)d"
msgstr ""
-#: nova/compute/manager.py:648
+#: nova/compute/manager.py:696
msgid "Instance build timed out. Set to error state."
msgstr ""
-#: nova/compute/manager.py:679
+#: nova/compute/manager.py:727
msgid "Instance has already been created"
msgstr ""
-#: nova/compute/manager.py:722
+#: nova/compute/manager.py:770
#, python-format
msgid ""
"image_id=%(image_id)s, image_size_bytes=%(size_bytes)d, "
"allowed_size_bytes=%(allowed_size_bytes)d"
msgstr ""
-#: nova/compute/manager.py:728
+#: nova/compute/manager.py:776
#, python-format
msgid ""
"Image '%(image_id)s' size %(size_bytes)d exceeded instance_type allowed "
"size %(allowed_size_bytes)d"
msgstr ""
-#: nova/compute/manager.py:738
+#: nova/compute/manager.py:786
msgid "Starting instance..."
msgstr ""
-#: nova/compute/manager.py:759
+#: nova/compute/manager.py:807
msgid "Instance failed network setup"
msgstr ""
-#: nova/compute/manager.py:763
+#: nova/compute/manager.py:811
#, python-format
msgid "Instance network_info: |%s|"
msgstr ""
-#: nova/compute/manager.py:776
+#: nova/compute/manager.py:824
msgid "Instance failed block device setup"
msgstr ""
-#: nova/compute/manager.py:794
+#: nova/compute/manager.py:842
msgid "Instance failed to spawn"
msgstr ""
-#: nova/compute/manager.py:818
+#: nova/compute/manager.py:866
msgid "Deallocating network for instance"
msgstr ""
-#: nova/compute/manager.py:890
+#: nova/compute/manager.py:938
#, python-format
msgid "%(action_str)s instance"
msgstr ""
-#: nova/compute/manager.py:921
+#: nova/compute/manager.py:969
#, python-format
msgid "Ignoring DiskNotFound: %s"
msgstr ""
-#: nova/compute/manager.py:924
+#: nova/compute/manager.py:972
#, python-format
msgid "Ignoring VolumeNotFound: %s"
msgstr ""
-#: nova/compute/manager.py:931
+#: nova/compute/manager.py:979
#, python-format
msgid "terminating bdm %s"
msgstr ""
-#: nova/compute/manager.py:955
+#: nova/compute/manager.py:1003
#, python-format
msgid "Ignoring volume cleanup failure due to %s"
msgstr ""
-#: nova/compute/manager.py:995 nova/compute/manager.py:1912
-#: nova/compute/manager.py:3112
+#: nova/compute/manager.py:1043 nova/compute/manager.py:1990
+#: nova/compute/manager.py:3199
#, python-format
msgid "%s. Setting instance vm_state to ERROR"
msgstr ""
-#: nova/compute/manager.py:1128
+#: nova/compute/manager.py:1176
msgid "Rebuilding instance"
msgstr ""
-#: nova/compute/manager.py:1207
+#: nova/compute/manager.py:1255
msgid "Rebooting instance"
msgstr ""
-#: nova/compute/manager.py:1231
+#: nova/compute/manager.py:1279
#, python-format
msgid ""
"trying to reboot a non-running instance: (state: %(state)s expected: "
"%(running)s)"
msgstr ""
-#: nova/compute/manager.py:1240
+#: nova/compute/manager.py:1288
#, python-format
msgid "Cannot reboot instance: %(exc)s"
msgstr ""
-#: nova/compute/manager.py:1277
+#: nova/compute/manager.py:1325
msgid "instance snapshotting"
msgstr ""
-#: nova/compute/manager.py:1283
+#: nova/compute/manager.py:1331
#, python-format
msgid ""
"trying to snapshot a non-running instance: (state: %(state)s expected: "
"%(running)s)"
msgstr ""
-#: nova/compute/manager.py:1336
+#: nova/compute/manager.py:1384
#, python-format
msgid "Found %(num_images)d images (rotation: %(rotation)d)"
msgstr ""
-#: nova/compute/manager.py:1343
+#: nova/compute/manager.py:1391
#, python-format
msgid "Rotating out %d backups"
msgstr ""
-#: nova/compute/manager.py:1348
+#: nova/compute/manager.py:1396
#, python-format
msgid "Deleting image %s"
msgstr ""
-#: nova/compute/manager.py:1379
+#: nova/compute/manager.py:1427
#, python-format
msgid "Failed to set admin password. Instance %s is not running"
msgstr ""
-#: nova/compute/manager.py:1386
+#: nova/compute/manager.py:1434
msgid "Root password set"
msgstr ""
-#: nova/compute/manager.py:1396
+#: nova/compute/manager.py:1444
msgid "set_admin_password is not implemented by this driver."
msgstr ""
-#: nova/compute/manager.py:1412
+#: nova/compute/manager.py:1460
#, python-format
msgid "set_admin_password failed: %s"
msgstr ""
-#: nova/compute/manager.py:1420
+#: nova/compute/manager.py:1468
msgid "error setting admin password"
msgstr ""
-#: nova/compute/manager.py:1435
+#: nova/compute/manager.py:1483
#, python-format
msgid ""
"trying to inject a file into a non-running (state: "
"%(current_power_state)s expected: %(expected_state)s)"
msgstr ""
-#: nova/compute/manager.py:1439
+#: nova/compute/manager.py:1487
#, python-format
msgid "injecting file to %(path)s"
msgstr ""
-#: nova/compute/manager.py:1452
+#: nova/compute/manager.py:1508
+msgid ""
+"Unable to find a different image to use for rescue VM, using instance's "
+"current image"
+msgstr ""
+
+#: nova/compute/manager.py:1521
msgid "Rescuing"
msgstr ""
-#: nova/compute/manager.py:1479
+#: nova/compute/manager.py:1552
msgid "Unrescuing"
msgstr ""
-#: nova/compute/manager.py:1500
+#: nova/compute/manager.py:1573
#, python-format
msgid "Changing instance metadata according to %(diff)r"
msgstr ""
-#: nova/compute/manager.py:1680
+#: nova/compute/manager.py:1746
msgid "destination same as source!"
msgstr ""
-#: nova/compute/manager.py:1699
+#: nova/compute/manager.py:1755
msgid "Migrating"
msgstr ""
-#: nova/compute/manager.py:1909
+#: nova/compute/manager.py:1987
#, python-format
msgid "Failed to rollback quota for failed finish_resize: %(qr_error)s"
msgstr ""
-#: nova/compute/manager.py:1965
+#: nova/compute/manager.py:2043
msgid "Pausing"
msgstr ""
-#: nova/compute/manager.py:1982
+#: nova/compute/manager.py:2060
msgid "Unpausing"
msgstr ""
-#: nova/compute/manager.py:2020
+#: nova/compute/manager.py:2098
msgid "Retrieving diagnostics"
msgstr ""
-#: nova/compute/manager.py:2050
+#: nova/compute/manager.py:2128
msgid "Resuming"
msgstr ""
-#: nova/compute/manager.py:2066
+#: nova/compute/manager.py:2144
msgid "Reset network"
msgstr ""
-#: nova/compute/manager.py:2071
+#: nova/compute/manager.py:2149
msgid "Inject network info"
msgstr ""
-#: nova/compute/manager.py:2074
+#: nova/compute/manager.py:2152
#, python-format
msgid "network_info to inject: |%s|"
msgstr ""
-#: nova/compute/manager.py:2091
+#: nova/compute/manager.py:2169
msgid "Get console output"
msgstr ""
-#: nova/compute/manager.py:2116
+#: nova/compute/manager.py:2194
msgid "Getting vnc console"
msgstr ""
-#: nova/compute/manager.py:2144
+#: nova/compute/manager.py:2222
#, python-format
msgid "Booting with volume %(volume_id)s at %(mountpoint)s"
msgstr ""
-#: nova/compute/manager.py:2188
+#: nova/compute/manager.py:2266
#, python-format
msgid "Attaching volume %(volume_id)s to %(mountpoint)s"
msgstr ""
-#: nova/compute/manager.py:2197
+#: nova/compute/manager.py:2275
#, python-format
msgid ""
"Failed to connect to volume %(volume_id)s while attaching at "
"%(mountpoint)s"
msgstr ""
-#: nova/compute/manager.py:2212
+#: nova/compute/manager.py:2290
#, python-format
msgid "Failed to attach volume %(volume_id)s at %(mountpoint)s"
msgstr ""
-#: nova/compute/manager.py:2241
+#: nova/compute/manager.py:2319
#, python-format
msgid "Detach volume %(volume_id)s from mountpoint %(mp)s"
msgstr ""
-#: nova/compute/manager.py:2245
+#: nova/compute/manager.py:2323
msgid "Detaching volume from unknown instance"
msgstr ""
-#: nova/compute/manager.py:2258
+#: nova/compute/manager.py:2336
#, python-format
msgid "Faild to detach volume %(volume_id)s from %(mp)s"
msgstr ""
-#: nova/compute/manager.py:2302
+#: nova/compute/manager.py:2380
#, python-format
msgid "Host %(host)s not found"
msgstr ""
-#: nova/compute/manager.py:2362
+#: nova/compute/manager.py:2440
msgid "Instance has no volume."
msgstr ""
-#: nova/compute/manager.py:2422
+#: nova/compute/manager.py:2500
#, python-format
msgid "Pre live migration failed at %(dest)s"
msgstr ""
-#: nova/compute/manager.py:2448
+#: nova/compute/manager.py:2526
msgid "_post_live_migration() is started.."
msgstr ""
-#: nova/compute/manager.py:2481
+#: nova/compute/manager.py:2559
msgid "No floating_ip found"
msgstr ""
-#: nova/compute/manager.py:2489
+#: nova/compute/manager.py:2567
msgid "No floating_ip found."
msgstr ""
-#: nova/compute/manager.py:2491
+#: nova/compute/manager.py:2569
#, python-format
msgid ""
"Live migration: Unexpected error: cannot inherit floating ip.\n"
"%(e)s"
msgstr ""
-#: nova/compute/manager.py:2517
+#: nova/compute/manager.py:2595
#, python-format
msgid "Migrating instance to %(dest)s finished successfully."
msgstr ""
-#: nova/compute/manager.py:2519
+#: nova/compute/manager.py:2597
msgid ""
"You may see the error \"libvirt: QEMU error: Domain not found: no domain "
"with matching name.\" This error can be safely ignored."
msgstr ""
-#: nova/compute/manager.py:2533
+#: nova/compute/manager.py:2611
msgid "Post operation of migration started"
msgstr ""
-#: nova/compute/manager.py:2664
+#: nova/compute/manager.py:2742
msgid "Updated the info_cache for instance"
msgstr ""
-#: nova/compute/manager.py:2693
+#: nova/compute/manager.py:2771
#, python-format
msgid ""
"Found %(migration_count)d unconfirmed migrations older than "
"%(confirm_window)d seconds"
msgstr ""
-#: nova/compute/manager.py:2698
+#: nova/compute/manager.py:2776
#, python-format
msgid "Setting migration %(migration_id)s to error: %(reason)s"
msgstr ""
-#: nova/compute/manager.py:2707
+#: nova/compute/manager.py:2785
#, python-format
msgid ""
"Automatically confirming migration %(migration_id)s for instance "
"%(instance_uuid)s"
msgstr ""
-#: nova/compute/manager.py:2714
+#: nova/compute/manager.py:2792
#, python-format
msgid "Instance %(instance_uuid)s not found"
msgstr ""
-#: nova/compute/manager.py:2718
+#: nova/compute/manager.py:2796
msgid "In ERROR state"
msgstr ""
-#: nova/compute/manager.py:2725
+#: nova/compute/manager.py:2803
#, python-format
msgid "In states %(vm_state)s/%(task_state)s, notRESIZED/None"
msgstr ""
-#: nova/compute/manager.py:2733
+#: nova/compute/manager.py:2811
#, python-format
msgid "Error auto-confirming resize: %(e)s. Will retry later."
msgstr ""
-#: nova/compute/manager.py:2750
+#: nova/compute/manager.py:2828
#, python-format
msgid ""
"Running instance usage audit for host %(host)s from %(begin_time)s to "
"%(end_time)s. %(number_instances)s instances."
msgstr ""
-#: nova/compute/manager.py:2768
+#: nova/compute/manager.py:2846
#, python-format
msgid "Failed to generate usage audit for instance on host %s"
msgstr ""
-#: nova/compute/manager.py:2791
+#: nova/compute/manager.py:2869
msgid "Updating bandwidth usage cache"
msgstr ""
-#: nova/compute/manager.py:2856
+#: nova/compute/manager.py:2935
msgid "Updating host status"
msgstr ""
-#: nova/compute/manager.py:2882
+#: nova/compute/manager.py:2963
#, python-format
msgid ""
"Found %(num_db_instances)s in the database and %(num_vm_instances)s on "
"the hypervisor."
msgstr ""
-#: nova/compute/manager.py:2888 nova/compute/manager.py:2926
+#: nova/compute/manager.py:2969 nova/compute/manager.py:3007
msgid "During sync_power_state the instance has a pending task. Skip."
msgstr ""
-#: nova/compute/manager.py:2913
+#: nova/compute/manager.py:2994
#, python-format
msgid ""
"During the sync_power process the instance has moved from host %(src)s to"
" host %(dst)s"
msgstr ""
-#: nova/compute/manager.py:2950
+#: nova/compute/manager.py:3031
msgid "Instance shutdown by itself. Calling the stop API."
msgstr ""
-#: nova/compute/manager.py:2962 nova/compute/manager.py:2973
-#: nova/compute/manager.py:2987
+#: nova/compute/manager.py:3043 nova/compute/manager.py:3054
+#: nova/compute/manager.py:3068
msgid "error during stop() in sync_power_state."
msgstr ""
-#: nova/compute/manager.py:2967
+#: nova/compute/manager.py:3048
msgid "Instance is paused or suspended unexpectedly. Calling the stop API."
msgstr ""
-#: nova/compute/manager.py:2980
+#: nova/compute/manager.py:3061
msgid "Instance is not stopped. Calling the stop API."
msgstr ""
-#: nova/compute/manager.py:2996
+#: nova/compute/manager.py:3077
msgid "Instance is not (soft-)deleted."
msgstr ""
-#: nova/compute/manager.py:3004
-msgid "FLAGS.reclaim_instance_interval <= 0, skipping..."
+#: nova/compute/manager.py:3085
+msgid "CONF.reclaim_instance_interval <= 0, skipping..."
msgstr ""
-#: nova/compute/manager.py:3017
+#: nova/compute/manager.py:3098
msgid "Reclaiming deleted instance"
msgstr ""
-#: nova/compute/manager.py:3066
+#: nova/compute/manager.py:3153
#, python-format
msgid ""
"Detected instance with name label '%(name)s' which is marked as DELETED "
"but still present on host."
msgstr ""
-#: nova/compute/manager.py:3073
+#: nova/compute/manager.py:3160
#, python-format
msgid ""
"Destroying instance with name label '%(name)s' which is marked as DELETED"
" but still present on host."
msgstr ""
-#: nova/compute/manager.py:3080
+#: nova/compute/manager.py:3167
#, python-format
-msgid "Unrecognized value '%(action)s' for FLAGS.running_deleted_instance_action"
+msgid "Unrecognized value '%(action)s' for CONF.running_deleted_instance_action"
msgstr ""
-#: nova/compute/resource_tracker.py:151
+#: nova/compute/resource_tracker.py:93
msgid ""
"Host field should be not be set on the instance until resources have been"
" claimed."
msgstr ""
-#: nova/compute/resource_tracker.py:168
-#, python-format
-msgid ""
-"Attempting claim: memory %(memory_mb)d MB, disk %(disk_gb)d GB, VCPUs "
-"%(vcpus)d"
+#: nova/compute/resource_tracker.py:236
+msgid "Auditing locally available compute resources"
msgstr ""
-#: nova/compute/resource_tracker.py:210
-#, python-format
-msgid ""
-"Total memory: %(total_mem)d MB, used: %(used_mem)d MB, free: %(free_mem)d"
-" MB"
-msgstr ""
-
-#: nova/compute/resource_tracker.py:219
-msgid "Memory limit not specified, defaulting to unlimited"
-msgstr ""
-
-#: nova/compute/resource_tracker.py:225
-#, python-format
-msgid "Memory limit: %(memory_mb_limit)d MB, free: %(free_ram_mb)d MB"
-msgstr ""
-
-#: nova/compute/resource_tracker.py:232
-#, python-format
-msgid ""
-"Unable to claim resources. Free memory %(free_ram_mb)d MB < requested "
-"memory %(memory_mb)d MB"
-msgstr ""
-
-#: nova/compute/resource_tracker.py:241
-#, python-format
-msgid ""
-"Total disk: %(total_disk)d GB, used: %(used_disk)d GB, free: "
-"%(free_disk)d GB"
-msgstr ""
-
-#: nova/compute/resource_tracker.py:250
-msgid "Disk limit not specified, defaulting to unlimited"
-msgstr ""
-
-#: nova/compute/resource_tracker.py:256
-#, python-format
-msgid "Disk limit: %(disk_gb_limit)d GB, free: %(free_disk_gb)d GB"
-msgstr ""
-
-#: nova/compute/resource_tracker.py:262
-#, python-format
-msgid ""
-"Unable to claim resources. Free disk %(free_disk_gb)d GB < requested "
-"disk %(disk_gb)d GB"
-msgstr ""
-
-#: nova/compute/resource_tracker.py:273
-#, python-format
-msgid "Total VCPUs: %(total_vcpus)d, used: %(used_vcpus)d"
-msgstr ""
-
-#: nova/compute/resource_tracker.py:280
-msgid "VCPU limit not specified, defaulting to unlimited"
-msgstr ""
-
-#: nova/compute/resource_tracker.py:284
-#, python-format
-msgid "CPU limit: %(vcpu_limit)d"
-msgstr ""
-
-#: nova/compute/resource_tracker.py:291
-#, python-format
-msgid ""
-"Unable to claim resources. Free CPU %(free_vcpus)d < requested CPU "
-"%(vcpus)d"
-msgstr ""
-
-#: nova/compute/resource_tracker.py:310
-#, python-format
-msgid "Finishing claim: %s"
-msgstr ""
-
-#: nova/compute/resource_tracker.py:325
-#, python-format
-msgid "Aborting claim: %s"
-msgstr ""
-
-#: nova/compute/resource_tracker.py:363
+#: nova/compute/resource_tracker.py:240
msgid ""
"Virt driver does not support 'get_available_resource' Compute tracking "
"is disabled."
msgstr ""
-#: nova/compute/resource_tracker.py:401
+#: nova/compute/resource_tracker.py:285
#, python-format
msgid "Compute_service record created for %s "
msgstr ""
-#: nova/compute/resource_tracker.py:406
+#: nova/compute/resource_tracker.py:290
#, python-format
msgid "Compute_service record updated for %s "
msgstr ""
-#: nova/compute/resource_tracker.py:425
+#: nova/compute/resource_tracker.py:303
#, python-format
msgid "No service record for host %s"
msgstr ""
-#: nova/compute/resource_tracker.py:435
+#: nova/compute/resource_tracker.py:313
#, python-format
msgid "Hypervisor: free ram (MB): %s"
msgstr ""
-#: nova/compute/resource_tracker.py:436
+#: nova/compute/resource_tracker.py:314
#, python-format
msgid "Hypervisor: free disk (GB): %s"
msgstr ""
-#: nova/compute/resource_tracker.py:441
+#: nova/compute/resource_tracker.py:319
#, python-format
msgid "Hypervisor: free VCPUs: %s"
msgstr ""
-#: nova/compute/resource_tracker.py:443
+#: nova/compute/resource_tracker.py:321
msgid "Hypervisor: VCPU information unavailable"
msgstr ""
-#: nova/compute/resource_tracker.py:450
+#: nova/compute/resource_tracker.py:328
#, python-format
msgid "Free ram (MB): %s"
msgstr ""
-#: nova/compute/resource_tracker.py:451
+#: nova/compute/resource_tracker.py:329
#, python-format
msgid "Free disk (GB): %s"
msgstr ""
-#: nova/compute/resource_tracker.py:456
+#: nova/compute/resource_tracker.py:334
#, python-format
msgid "Free VCPUS: %s"
msgstr ""
-#: nova/compute/resource_tracker.py:458
+#: nova/compute/resource_tracker.py:336
msgid "Free VCPU information unavailable"
msgstr ""
-#: nova/compute/resource_tracker.py:533
+#: nova/compute/resource_tracker.py:430
+msgid "Instance not resizing, skipping migration."
+msgstr ""
+
+#: nova/compute/resource_tracker.py:498
#, python-format
msgid "Missing keys: %s"
msgstr ""
-#: nova/compute/rpcapi.py:44
+#: nova/compute/rpcapi.py:43
msgid "No compute host specified"
msgstr ""
-#: nova/compute/rpcapi.py:47
+#: nova/compute/rpcapi.py:46
#, python-format
msgid "Unable to find host for Instance %s"
msgstr ""
-#: nova/compute/utils.py:87
+#: nova/compute/utils.py:92
#, python-format
msgid "Using %(prefix)s instead of %(req_prefix)s"
msgstr ""
-#: nova/console/manager.py:81 nova/console/vmrc_manager.py:63
+#: nova/console/manager.py:82 nova/console/vmrc_manager.py:64
msgid "Adding console"
msgstr ""
-#: nova/console/manager.py:102 nova/console/vmrc_manager.py:115
+#: nova/console/manager.py:103 nova/console/vmrc_manager.py:116
#, python-format
msgid "Tried to remove non-existent console %(console_id)s."
msgstr ""
-#: nova/console/vmrc_manager.py:118
+#: nova/console/vmrc_manager.py:119
#, python-format
msgid "Removing console %(console_id)s."
msgstr ""
-#: nova/console/xvp.py:98
+#: nova/console/xvp.py:99
msgid "Rebuilding xvp conf"
msgstr ""
-#: nova/console/xvp.py:116
+#: nova/console/xvp.py:117
#, python-format
msgid "Re-wrote %s"
msgstr ""
-#: nova/console/xvp.py:121
+#: nova/console/xvp.py:122
msgid "Stopping xvp"
msgstr ""
-#: nova/console/xvp.py:134
+#: nova/console/xvp.py:135
msgid "Starting xvp"
msgstr ""
-#: nova/console/xvp.py:141
+#: nova/console/xvp.py:142
#, python-format
msgid "Error starting xvp: %s"
msgstr ""
-#: nova/console/xvp.py:144
+#: nova/console/xvp.py:145
msgid "Restarting xvp"
msgstr ""
-#: nova/console/xvp.py:146
+#: nova/console/xvp.py:147
msgid "xvp not running..."
msgstr ""
-#: nova/consoleauth/manager.py:70
+#: nova/consoleauth/manager.py:71
#, python-format
msgid "Received Token: %(token)s, %(token_dict)s)"
msgstr ""
-#: nova/consoleauth/manager.py:75
+#: nova/consoleauth/manager.py:76
#, python-format
msgid "Checking Token: %(token)s, %(token_valid)s)"
msgstr ""
-#: nova/db/sqlalchemy/api.py:208
+#: nova/db/sqlalchemy/api.py:181 nova/virt/baremetal/db/sqlalchemy/api.py:67
#, python-format
msgid "Unrecognized read_deleted value '%s'"
msgstr ""
-#: nova/db/sqlalchemy/api.py:2792
+#: nova/db/sqlalchemy/api.py:2704
#, python-format
msgid "Change will make usage less than 0 for the following resources: %(unders)s"
msgstr ""
-#: nova/db/sqlalchemy/api.py:4693
-msgid "Backend exists"
-msgstr ""
-
-#: nova/db/sqlalchemy/api.py:4713 nova/db/sqlalchemy/api.py:4739
-#, python-format
-msgid "No backend config with id %(sm_backend_id)s"
-msgstr ""
-
-#: nova/db/sqlalchemy/api.py:4751
-#, python-format
-msgid "No backend config with sr uuid %(sr_uuid)s"
-msgstr ""
-
-#: nova/db/sqlalchemy/api.py:4785
-msgid "Flavor exists"
-msgstr ""
-
-#: nova/db/sqlalchemy/api.py:4800
-#, python-format
-msgid "%(sm_flavor_id) flavor not found"
-msgstr ""
-
-#: nova/db/sqlalchemy/api.py:4819
-#, python-format
-msgid "No sm_flavor called %(sm_flavor_id)s"
-msgstr ""
-
-#: nova/db/sqlalchemy/api.py:4836
-#, python-format
-msgid "No sm_flavor called %(sm_flavor_label)s"
-msgstr ""
-
-#: nova/db/sqlalchemy/api.py:4874
-#, python-format
-msgid "No sm_volume with id %(volume_id)s"
-msgstr ""
-
-#: nova/db/sqlalchemy/migration.py:74
+#: nova/db/sqlalchemy/migration.py:72
+#: nova/virt/baremetal/db/sqlalchemy/migration.py:69
msgid "version should be an integer"
msgstr ""
-#: nova/db/sqlalchemy/migration.py:101
+#: nova/db/sqlalchemy/migration.py:99
+#: nova/virt/baremetal/db/sqlalchemy/migration.py:96
msgid "Upgrade DB using Essex release first."
msgstr ""
-#: nova/db/sqlalchemy/session.py:316
+#: nova/db/sqlalchemy/session.py:322
#, python-format
msgid "SQL connection failed. %s attempts left."
msgstr ""
@@ -3938,49 +3779,49 @@ msgstr ""
msgid "user_quotas table not dropped"
msgstr ""
-#: nova/image/glance.py:143
+#: nova/image/glance.py:144
#, python-format
msgid ""
"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', "
"%(extra)s."
msgstr ""
-#: nova/image/s3.py:311
+#: nova/image/s3.py:312
#, python-format
msgid "Failed to download %(image_location)s to %(image_path)s"
msgstr ""
-#: nova/image/s3.py:328
+#: nova/image/s3.py:329
#, python-format
msgid "Failed to decrypt %(image_location)s to %(image_path)s"
msgstr ""
-#: nova/image/s3.py:338
+#: nova/image/s3.py:339
#, python-format
msgid "Failed to untar %(image_location)s to %(image_path)s"
msgstr ""
-#: nova/image/s3.py:348
+#: nova/image/s3.py:349
#, python-format
msgid "Failed to upload %(image_location)s to %(image_path)s"
msgstr ""
-#: nova/image/s3.py:372
+#: nova/image/s3.py:373
#, python-format
msgid "Failed to decrypt private key: %s"
msgstr ""
-#: nova/image/s3.py:379
+#: nova/image/s3.py:380
#, python-format
msgid "Failed to decrypt initialization vector: %s"
msgstr ""
-#: nova/image/s3.py:390
+#: nova/image/s3.py:391
#, python-format
msgid "Failed to decrypt image file %(image_file)s: %(err)s"
msgstr ""
-#: nova/image/s3.py:402
+#: nova/image/s3.py:403
msgid "Unsafe filenames in image"
msgstr ""
@@ -3999,178 +3840,183 @@ msgstr ""
msgid "Bad project_id for to_global_ipv6: %s"
msgstr ""
-#: nova/network/api.py:54
+#: nova/network/api.py:52
msgid "instance is a required argument to use @refresh_cache"
msgstr ""
-#: nova/network/api.py:81
+#: nova/network/api.py:79
#, python-format
msgid "args: %s"
msgstr ""
-#: nova/network/api.py:82
+#: nova/network/api.py:80
#, python-format
msgid "kwargs: %s"
msgstr ""
-#: nova/network/api.py:170
+#: nova/network/api.py:171
#, python-format
msgid "re-assign floating IP %(address)s from instance %(instance_id)s"
msgstr ""
-#: nova/network/ldapdns.py:317
+#: nova/network/ldapdns.py:318
msgid "This driver only supports type 'a' entries."
msgstr ""
-#: nova/network/linux_net.py:179
+#: nova/network/linux_net.py:180
#, python-format
msgid "Attempted to remove chain %s which does not exist"
msgstr ""
-#: nova/network/linux_net.py:214
+#: nova/network/linux_net.py:215
#, python-format
msgid "Unknown chain: %r"
msgstr ""
-#: nova/network/linux_net.py:239
+#: nova/network/linux_net.py:240
#, python-format
msgid ""
"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r "
"%(top)r"
msgstr ""
-#: nova/network/linux_net.py:374
+#: nova/network/linux_net.py:375
msgid "IPTablesManager.apply completed with success"
msgstr ""
-#: nova/network/linux_net.py:580
+#: nova/network/linux_net.py:581
#, python-format
msgid "arping error for ip %s"
msgstr ""
-#: nova/network/linux_net.py:790
+#: nova/network/linux_net.py:792
#, python-format
msgid "Pid %d is stale, skip killing dnsmasq"
msgstr ""
-#: nova/network/linux_net.py:830
+#: nova/network/linux_net.py:832
#, python-format
msgid "Hupping dnsmasq threw %s"
msgstr ""
-#: nova/network/linux_net.py:832
+#: nova/network/linux_net.py:834
#, python-format
msgid "Pid %d is stale, relaunching dnsmasq"
msgstr ""
-#: nova/network/linux_net.py:895
+#: nova/network/linux_net.py:897
#, python-format
msgid "killing radvd threw %s"
msgstr ""
-#: nova/network/linux_net.py:897
+#: nova/network/linux_net.py:899
#, python-format
msgid "Pid %d is stale, relaunching radvd"
msgstr ""
-#: nova/network/linux_net.py:1127
+#: nova/network/linux_net.py:1129
#, python-format
msgid "Starting VLAN inteface %s"
msgstr ""
-#: nova/network/linux_net.py:1163
+#: nova/network/linux_net.py:1168
#, python-format
-msgid "Starting Bridge interface for %s"
+msgid "Starting Bridge %s"
msgstr ""
-#: nova/network/linux_net.py:1206
+#: nova/network/linux_net.py:1180
+#, python-format
+msgid "Adding interface %(interface)s to bridge %(bridge)s"
+msgstr ""
+
+#: nova/network/linux_net.py:1213
#, python-format
msgid "Failed to add interface: %s"
msgstr ""
-#: nova/network/linux_net.py:1307
+#: nova/network/linux_net.py:1315
#, python-format
msgid "Starting bridge %s "
msgstr ""
-#: nova/network/linux_net.py:1315
+#: nova/network/linux_net.py:1323
#, python-format
msgid "Done starting bridge %s"
msgstr ""
-#: nova/network/linux_net.py:1334
+#: nova/network/linux_net.py:1342
#, python-format
msgid "Failed unplugging gateway interface '%s'"
msgstr ""
-#: nova/network/linux_net.py:1336
+#: nova/network/linux_net.py:1344
#, python-format
msgid "Unplugged gateway interface '%s'"
msgstr ""
-#: nova/network/manager.py:285
+#: nova/network/manager.py:286
#, python-format
msgid "Fixed ip %(fixed_ip_id)s not found"
msgstr ""
-#: nova/network/manager.py:294 nova/network/manager.py:553
+#: nova/network/manager.py:295 nova/network/manager.py:554
#, python-format
msgid "Interface %(interface)s not found"
msgstr ""
-#: nova/network/manager.py:309
+#: nova/network/manager.py:310
#, python-format
msgid "floating IP allocation for instance |%s|"
msgstr ""
-#: nova/network/manager.py:373
+#: nova/network/manager.py:374
msgid "Floating IP is not associated. Ignore."
msgstr ""
-#: nova/network/manager.py:391
+#: nova/network/manager.py:392
#, python-format
msgid "Address |%(address)s| is not allocated"
msgstr ""
-#: nova/network/manager.py:395
+#: nova/network/manager.py:396
#, python-format
msgid "Address |%(address)s| is not allocated to your project |%(project)s|"
msgstr ""
-#: nova/network/manager.py:416
+#: nova/network/manager.py:417
#, python-format
msgid "Quota exceeded for %(pid)s, tried to allocate floating IP"
msgstr ""
-#: nova/network/manager.py:477
+#: nova/network/manager.py:478
msgid "Failed to update usages deallocating floating IP"
msgstr ""
-#: nova/network/manager.py:674
+#: nova/network/manager.py:677
#, python-format
msgid "Starting migration network for instance %(instance_uuid)s"
msgstr ""
-#: nova/network/manager.py:681
+#: nova/network/manager.py:684
#, python-format
msgid ""
"Floating ip address |%(address)s| no longer belongs to instance "
"%(instance_uuid)s. Will notmigrate it "
msgstr ""
-#: nova/network/manager.py:707
+#: nova/network/manager.py:714
#, python-format
msgid "Finishing migration network for instance %(instance_uuid)s"
msgstr ""
-#: nova/network/manager.py:715
+#: nova/network/manager.py:722
#, python-format
msgid ""
"Floating ip address |%(address)s| no longer belongs to instance "
"%(instance_uuid)s. Will notsetup it."
msgstr ""
-#: nova/network/manager.py:762
+#: nova/network/manager.py:769
#, python-format
msgid ""
"Database inconsistency: DNS domain |%s| is registered in the Nova db but "
@@ -4178,39 +4024,39 @@ msgid ""
"ignored."
msgstr ""
-#: nova/network/manager.py:808
+#: nova/network/manager.py:815
#, python-format
msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|."
msgstr ""
-#: nova/network/manager.py:818
+#: nova/network/manager.py:825
#, python-format
msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|."
msgstr ""
-#: nova/network/manager.py:932
+#: nova/network/manager.py:939
#, python-format
msgid "Disassociated %s stale fixed ip(s)"
msgstr ""
-#: nova/network/manager.py:936
+#: nova/network/manager.py:943
msgid "setting network host"
msgstr ""
-#: nova/network/manager.py:1050
+#: nova/network/manager.py:1057
msgid "network allocations"
msgstr ""
-#: nova/network/manager.py:1055
+#: nova/network/manager.py:1062
#, python-format
msgid "networks retrieved for instance: |%(networks)s|"
msgstr ""
-#: nova/network/manager.py:1085
+#: nova/network/manager.py:1092
msgid "network deallocation for instance"
msgstr ""
-#: nova/network/manager.py:1308
+#: nova/network/manager.py:1319
#, python-format
msgid ""
"instance-dns-zone is |%(domain)s|, which is in availability zone "
@@ -4218,96 +4064,96 @@ msgid ""
"created."
msgstr ""
-#: nova/network/manager.py:1389
+#: nova/network/manager.py:1400
#, python-format
msgid "Unable to release %s because vif doesn't exist."
msgstr ""
-#: nova/network/manager.py:1410
+#: nova/network/manager.py:1421
#, python-format
msgid "Leased IP |%(address)s|"
msgstr ""
-#: nova/network/manager.py:1414
+#: nova/network/manager.py:1425
#, python-format
msgid "IP %s leased that is not associated"
msgstr ""
-#: nova/network/manager.py:1422
+#: nova/network/manager.py:1433
#, python-format
msgid "IP |%s| leased that isn't allocated"
msgstr ""
-#: nova/network/manager.py:1427
+#: nova/network/manager.py:1438
#, python-format
msgid "Released IP |%(address)s|"
msgstr ""
-#: nova/network/manager.py:1431
+#: nova/network/manager.py:1442
#, python-format
msgid "IP %s released that is not associated"
msgstr ""
-#: nova/network/manager.py:1434
+#: nova/network/manager.py:1445
#, python-format
msgid "IP %s released that was not leased"
msgstr ""
-#: nova/network/manager.py:1453
+#: nova/network/manager.py:1464
#, python-format
msgid "%s must be an integer"
msgstr ""
-#: nova/network/manager.py:1477
+#: nova/network/manager.py:1488
msgid "Maximum allowed length for 'label' is 255."
msgstr ""
-#: nova/network/manager.py:1497
+#: nova/network/manager.py:1508
#, python-format
msgid ""
"Subnet(s) too large, defaulting to /%s. To override, specify "
"network_size flag."
msgstr ""
-#: nova/network/manager.py:1578
+#: nova/network/manager.py:1589
msgid "cidr already in use"
msgstr ""
-#: nova/network/manager.py:1581
+#: nova/network/manager.py:1592
#, python-format
msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)"
msgstr ""
-#: nova/network/manager.py:1592
+#: nova/network/manager.py:1603
#, python-format
msgid ""
"requested cidr (%(cidr)s) conflicts with existing smaller cidr "
"(%(smaller)s)"
msgstr ""
-#: nova/network/manager.py:1651
+#: nova/network/manager.py:1660
msgid "Network already exists!"
msgstr ""
-#: nova/network/manager.py:1671
+#: nova/network/manager.py:1680
#, python-format
msgid "Network must be disassociated from project %s before delete"
msgstr ""
-#: nova/network/manager.py:2137
+#: nova/network/manager.py:2150
msgid ""
"The sum between the number of networks and the vlan start cannot be "
"greater than 4094"
msgstr ""
-#: nova/network/manager.py:2144
+#: nova/network/manager.py:2157
#, python-format
msgid ""
"The network range is not big enough to fit %(num_networks)s. Network size"
" is %(network_size)s"
msgstr ""
-#: nova/network/minidns.py:65
+#: nova/network/minidns.py:68
msgid "This driver only supports type 'a'"
msgstr ""
@@ -4315,80 +4161,80 @@ msgstr ""
msgid "v4 subnets are required for legacy nw_info"
msgstr ""
-#: nova/network/quantum/nova_ipam_lib.py:75
+#: nova/network/quantum/nova_ipam_lib.py:72
msgid "Error creating network entry"
msgstr ""
-#: nova/network/quantum/nova_ipam_lib.py:90
+#: nova/network/quantum/nova_ipam_lib.py:87
#, python-format
msgid "No network with net_id = %s"
msgstr ""
-#: nova/network/quantum/nova_ipam_lib.py:256
+#: nova/network/quantum/nova_ipam_lib.py:253
#, python-format
msgid "No fixed IPs to deallocate for vif %s"
msgstr ""
-#: nova/network/quantumv2/__init__.py:41
+#: nova/network/quantumv2/__init__.py:42
msgid "_get_auth_token() failed"
msgstr ""
-#: nova/network/quantumv2/api.py:105
+#: nova/network/quantumv2/api.py:103
#, python-format
msgid "allocate_for_instance() for %s"
msgstr ""
-#: nova/network/quantumv2/api.py:108
+#: nova/network/quantumv2/api.py:106
#, python-format
msgid "empty project id for instance %s"
msgstr ""
-#: nova/network/quantumv2/api.py:161
+#: nova/network/quantumv2/api.py:159
#, python-format
msgid "Fail to delete port %(portid)s with failure: %(exception)s"
msgstr ""
-#: nova/network/quantumv2/api.py:173
+#: nova/network/quantumv2/api.py:171
#, python-format
msgid "deallocate_for_instance() for %s"
msgstr ""
-#: nova/network/quantumv2/api.py:182
+#: nova/network/quantumv2/api.py:180
#, python-format
msgid "Failed to delete quantum port %(portid)s "
msgstr ""
-#: nova/network/quantumv2/api.py:192
+#: nova/network/quantumv2/api.py:190
#, python-format
msgid "get_instance_nw_info() for %s"
msgstr ""
-#: nova/network/quantumv2/api.py:207
+#: nova/network/quantumv2/api.py:205
#, python-format
msgid "validate_networks() for %s"
msgstr ""
-#: nova/network/quantumv2/api.py:459
+#: nova/network/quantumv2/api.py:457
#, python-format
msgid "Multiple floating IP pools matches found for name '%s'"
msgstr ""
-#: nova/openstack/common/lockutils.py:98
+#: nova/openstack/common/lockutils.py:97
#, python-format
msgid "Could not release the acquired lock `%s`"
msgstr ""
-#: nova/openstack/common/lockutils.py:184
+#: nova/openstack/common/lockutils.py:183
#, python-format
msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..."
msgstr ""
-#: nova/openstack/common/lockutils.py:188
+#: nova/openstack/common/lockutils.py:187
#, python-format
msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..."
msgstr ""
-#: nova/openstack/common/lockutils.py:216
+#: nova/openstack/common/lockutils.py:215
#, python-format
msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..."
msgstr ""
@@ -4440,7 +4286,11 @@ msgstr ""
msgid "Failed to load notifier %s. These notifications will not be sent."
msgstr ""
-#: nova/openstack/common/notifier/rabbit_notifier.py:45
+#: nova/openstack/common/notifier/rabbit_notifier.py:27
+msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead."
+msgstr ""
+
+#: nova/openstack/common/notifier/rpc_notifier.py:45
#, python-format
msgid "Could not send notification to %(topic)s. Payload=%(message)s"
msgstr ""
@@ -4524,73 +4374,73 @@ msgid "Returning exception %s to caller"
msgstr ""
#: nova/openstack/common/rpc/impl_kombu.py:168
-#: nova/openstack/common/rpc/impl_qpid.py:144
+#: nova/openstack/common/rpc/impl_qpid.py:126
msgid "Failed to process message... skipping it."
msgstr ""
-#: nova/openstack/common/rpc/impl_kombu.py:468
+#: nova/openstack/common/rpc/impl_kombu.py:469
#, python-format
msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d"
msgstr ""
-#: nova/openstack/common/rpc/impl_kombu.py:490
+#: nova/openstack/common/rpc/impl_kombu.py:491
#, python-format
msgid "Connected to AMQP server on %(hostname)s:%(port)d"
msgstr ""
-#: nova/openstack/common/rpc/impl_kombu.py:527
+#: nova/openstack/common/rpc/impl_kombu.py:528
#, python-format
msgid ""
"Unable to connect to AMQP server on %(hostname)s:%(port)d after "
"%(max_retries)d tries: %(err_str)s"
msgstr ""
-#: nova/openstack/common/rpc/impl_kombu.py:543
+#: nova/openstack/common/rpc/impl_kombu.py:544
#, python-format
msgid ""
"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying "
"again in %(sleep_time)d seconds."
msgstr ""
-#: nova/openstack/common/rpc/impl_kombu.py:595
-#: nova/openstack/common/rpc/impl_qpid.py:403
+#: nova/openstack/common/rpc/impl_kombu.py:596
+#: nova/openstack/common/rpc/impl_qpid.py:381
#, python-format
msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s"
msgstr ""
-#: nova/openstack/common/rpc/impl_kombu.py:613
-#: nova/openstack/common/rpc/impl_qpid.py:418
+#: nova/openstack/common/rpc/impl_kombu.py:614
+#: nova/openstack/common/rpc/impl_qpid.py:396
#, python-format
msgid "Timed out waiting for RPC response: %s"
msgstr ""
-#: nova/openstack/common/rpc/impl_kombu.py:617
-#: nova/openstack/common/rpc/impl_qpid.py:422
+#: nova/openstack/common/rpc/impl_kombu.py:618
+#: nova/openstack/common/rpc/impl_qpid.py:400
#, python-format
msgid "Failed to consume message from queue: %s"
msgstr ""
-#: nova/openstack/common/rpc/impl_kombu.py:651
-#: nova/openstack/common/rpc/impl_qpid.py:452
+#: nova/openstack/common/rpc/impl_kombu.py:652
+#: nova/openstack/common/rpc/impl_qpid.py:430
#, python-format
msgid "Failed to publish message to topic '%(topic)s': %(err_str)s"
msgstr ""
-#: nova/openstack/common/rpc/impl_qpid.py:359
+#: nova/openstack/common/rpc/impl_qpid.py:330
#, python-format
-msgid "Unable to connect to AMQP server: %s"
+msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds"
msgstr ""
-#: nova/openstack/common/rpc/impl_qpid.py:364
+#: nova/openstack/common/rpc/impl_qpid.py:338
#, python-format
msgid "Connected to AMQP server on %s"
msgstr ""
-#: nova/openstack/common/rpc/impl_qpid.py:372
+#: nova/openstack/common/rpc/impl_qpid.py:350
msgid "Re-established AMQP queues"
msgstr ""
-#: nova/openstack/common/rpc/impl_qpid.py:430
+#: nova/openstack/common/rpc/impl_qpid.py:408
msgid "Error processing message. Skipping it."
msgstr ""
@@ -4764,127 +4614,127 @@ msgstr ""
msgid "No key defining hosts for topic '%s', see ringfile"
msgstr ""
-#: nova/scheduler/chance.py:50
+#: nova/scheduler/chance.py:49
msgid "Is the appropriate service running?"
msgstr ""
-#: nova/scheduler/chance.py:55
+#: nova/scheduler/chance.py:54
msgid "Could not find another compute"
msgstr ""
-#: nova/scheduler/driver.py:66
+#: nova/scheduler/driver.py:65
msgid "Exception during scheduler.run_instance"
msgstr ""
-#: nova/scheduler/driver.py:70 nova/scheduler/manager.py:185
+#: nova/scheduler/driver.py:69 nova/scheduler/manager.py:184
#, python-format
msgid "Setting instance to %(state)s state."
msgstr ""
-#: nova/scheduler/driver.py:112
+#: nova/scheduler/driver.py:120
#, python-format
msgid "Casted '%(method)s' to compute '%(host)s'"
msgstr ""
-#: nova/scheduler/driver.py:127
+#: nova/scheduler/driver.py:135
#, python-format
msgid "Casted '%(method)s' to %(topic)s '%(host)s'"
msgstr ""
-#: nova/scheduler/driver.py:175
+#: nova/scheduler/driver.py:183
msgid "Driver must implement schedule_prep_resize"
msgstr ""
-#: nova/scheduler/driver.py:183
+#: nova/scheduler/driver.py:191
msgid "Driver must implement schedule_run_instance"
msgstr ""
-#: nova/scheduler/driver.py:315
+#: nova/scheduler/driver.py:323
#, python-format
msgid ""
"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of "
"memory(host:%(avail)s <= instance:%(mem_inst)s)"
msgstr ""
-#: nova/scheduler/filter_scheduler.py:57
+#: nova/scheduler/filter_scheduler.py:52
#, python-format
msgid "Attempting to build %(num_instances)d instance(s)"
msgstr ""
-#: nova/scheduler/filter_scheduler.py:192
+#: nova/scheduler/filter_scheduler.py:190
msgid "Invalid value for 'scheduler_max_attempts', must be >= 1"
msgstr ""
-#: nova/scheduler/filter_scheduler.py:219
+#: nova/scheduler/filter_scheduler.py:217
#, python-format
msgid ""
"Exceeded max scheduling attempts %(max_attempts)d for instance "
"%(instance_uuid)s"
msgstr ""
-#: nova/scheduler/filter_scheduler.py:230
-msgid "Scheduler only understands Compute nodes (for now)"
-msgstr ""
-
-#: nova/scheduler/filter_scheduler.py:282
+#: nova/scheduler/filter_scheduler.py:271
#, python-format
msgid "Filtered %(hosts)s"
msgstr ""
-#: nova/scheduler/filter_scheduler.py:292
+#: nova/scheduler/filter_scheduler.py:276
#, python-format
-msgid "Weighted %(weighted_host)s"
+msgid "Choosing host %(best_host)s"
msgstr ""
-#: nova/scheduler/host_manager.py:247
+#: nova/scheduler/host_manager.py:305
#, python-format
-msgid "Host filter fails for ignored host %(host)s"
+msgid "Host filter ignoring hosts: %(ignored_hosts)s"
msgstr ""
-#: nova/scheduler/host_manager.py:254
+#: nova/scheduler/host_manager.py:314
#, python-format
-msgid "Host filter fails for non-forced host %(host)s"
+msgid ""
+"No hosts matched due to not matching 'force_hosts'value of "
+"'%(forced_hosts)s'"
msgstr ""
-#: nova/scheduler/host_manager.py:260
+#: nova/scheduler/host_manager.py:319
#, python-format
-msgid "Host filter function %(func)s failed for %(host)s"
+msgid "Host filter forcing available hosts to %(forced_hosts)s"
msgstr ""
-#: nova/scheduler/host_manager.py:266
+#: nova/scheduler/host_manager.py:335
#, python-format
-msgid "Host filter passes for %(host)s"
+msgid "Ignoring %(service_name)s service update from %(host)s"
msgstr ""
-#: nova/scheduler/host_manager.py:329
+#: nova/scheduler/host_manager.py:340
#, python-format
-msgid "Received %(service_name)s service update from %(host)s."
-msgstr ""
-
-#: nova/scheduler/host_manager.py:352
-msgid "host_manager only implemented for 'compute'"
+msgid "Received %(service_name)s service update from %(state_key)s."
msgstr ""
-#: nova/scheduler/host_manager.py:360
+#: nova/scheduler/host_manager.py:358
#, python-format
msgid "No service for compute ID %s"
msgstr ""
-#: nova/scheduler/manager.py:169
+#: nova/scheduler/manager.py:168
#, python-format
msgid "Failed to schedule_%(method)s: %(ex)s"
msgstr ""
-#: nova/scheduler/scheduler_options.py:70
+#: nova/scheduler/scheduler_options.py:69
#, python-format
msgid "Could not stat scheduler options file %(filename)s: '%(e)s'"
msgstr ""
-#: nova/scheduler/scheduler_options.py:79
+#: nova/scheduler/scheduler_options.py:78
#, python-format
msgid "Could not decode scheduler options: '%(e)s'"
msgstr ""
+#: nova/scheduler/filters/__init__.py:55
+msgid ""
+"Use 'nova.scheduler.filters.all_filters' instead of "
+"'nova.scheduler.filters.standard_filters'"
+msgstr ""
+
#: nova/scheduler/filters/aggregate_instance_extra_specs.py:49
#: nova/scheduler/filters/aggregate_instance_extra_specs.py:56
#: nova/scheduler/filters/compute_capabilities_filter.py:48
@@ -4892,17 +4742,17 @@ msgstr ""
msgid "%(host_state)s fails instance_type extra_specs requirements"
msgstr ""
-#: nova/scheduler/filters/compute_filter.py:39
+#: nova/scheduler/filters/compute_filter.py:35
#, python-format
msgid "%(host_state)s is disabled or has not been heard from in a while"
msgstr ""
-#: nova/scheduler/filters/compute_filter.py:43
+#: nova/scheduler/filters/compute_filter.py:39
#, python-format
msgid "%(host_state)s is disabled via capabilities"
msgstr ""
-#: nova/scheduler/filters/core_filter.py:46
+#: nova/scheduler/filters/core_filter.py:45
msgid "VCPUs not set; assuming CPU collection broken"
msgstr ""
@@ -4970,6 +4820,10 @@ msgstr ""
msgid "TCP: trust state of %(host)s:%(level)s(%(trust)s)"
msgstr ""
+#: nova/scheduler/weights/__init__.py:58
+msgid "least_cost has been deprecated in favor of the RAM Weigher."
+msgstr ""
+
#: nova/tests/fake_ldap.py:34
msgid "Attempted to instantiate singleton"
msgstr ""
@@ -4994,15 +4848,15 @@ msgstr ""
msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'"
msgstr ""
-#: nova/tests/fake_volume.py:180 nova/volume/cinder.py:159
+#: nova/tests/fake_volume.py:182 nova/volume/cinder.py:160
msgid "status must be available"
msgstr ""
-#: nova/tests/fake_volume.py:184 nova/volume/cinder.py:162
+#: nova/tests/fake_volume.py:186 nova/volume/cinder.py:163
msgid "already attached"
msgstr ""
-#: nova/tests/fake_volume.py:189 nova/volume/cinder.py:168
+#: nova/tests/fake_volume.py:191 nova/volume/cinder.py:169
msgid "already detached"
msgstr ""
@@ -5022,6 +4876,16 @@ msgstr ""
msgid "Expected a function in 'auth[1]' parameter"
msgstr ""
+#: nova/tests/test_libvirt.py:211
+#, python-format
+msgid "Could not find iSCSI export for volume %s"
+msgstr ""
+
+#: nova/tests/test_libvirt.py:215
+#, python-format
+msgid "ISCSI Discovery: Found %s"
+msgstr ""
+
#: nova/tests/test_misc.py:62
#, python-format
msgid ""
@@ -5065,12 +4929,12 @@ msgstr ""
msgid "uuid"
msgstr ""
-#: nova/tests/test_xenapi.py:724
+#: nova/tests/test_xenapi.py:727
#, python-format
msgid "Creating files in %s to simulate guest agent"
msgstr ""
-#: nova/tests/test_xenapi.py:735
+#: nova/tests/test_xenapi.py:738
#, python-format
msgid "Removing simulated guest agent files in %s"
msgstr ""
@@ -5087,118 +4951,140 @@ msgstr ""
msgid "unexpected role header"
msgstr ""
-#: nova/tests/api/openstack/compute/test_servers.py:2996
+#: nova/tests/api/openstack/compute/test_servers.py:2999
msgid ""
"Quota exceeded for instances: Requested 1, but already used 10 of 10 "
"instances"
msgstr ""
-#: nova/tests/api/openstack/compute/test_servers.py:3001
+#: nova/tests/api/openstack/compute/test_servers.py:3004
msgid "Quota exceeded for ram: Requested 4096, but already used 8192 of 10240 ram"
msgstr ""
-#: nova/tests/api/openstack/compute/test_servers.py:3006
+#: nova/tests/api/openstack/compute/test_servers.py:3009
msgid "Quota exceeded for cores: Requested 2, but already used 9 of 10 cores"
msgstr ""
-#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:56
+#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:54
#, python-format
msgid "_create: %s"
msgstr ""
-#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:65
+#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:63
#, python-format
msgid "_delete: %s"
msgstr ""
-#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:74
+#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:72
#, python-format
msgid "_get: %s"
msgstr ""
-#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:84
+#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:82
#, python-format
msgid "_get_all: %s"
msgstr ""
-#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:128
+#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:126
#, python-format
msgid "test_snapshot_create: param=%s"
msgstr ""
-#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:137
+#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:135
#, python-format
msgid "test_snapshot_create: resp_dict=%s"
msgstr ""
-#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:159
-#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:185
+#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:157
+#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:183
#, python-format
msgid "test_snapshot_create_force: param=%s"
msgstr ""
-#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:168
+#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:166
#, python-format
msgid "test_snapshot_create_force: resp_dict=%s"
msgstr ""
-#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:221
+#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:219
#, python-format
msgid "test_snapshot_show: resp=%s"
msgstr ""
-#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:247
+#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:245
#, python-format
msgid "test_snapshot_detail: resp_dict=%s"
msgstr ""
-#: nova/tests/compute/test_compute.py:619
-#: nova/tests/compute/test_compute.py:637
-#: nova/tests/compute/test_compute.py:673
-#: nova/tests/compute/test_compute.py:698
-#: nova/tests/compute/test_compute.py:2373
+#: nova/tests/compute/test_compute.py:626
+#: nova/tests/compute/test_compute.py:644
+#: nova/tests/compute/test_compute.py:680
+#: nova/tests/compute/test_compute.py:705
+#: nova/tests/compute/test_compute.py:2384
#, python-format
msgid "Running instances: %s"
msgstr ""
-#: nova/tests/compute/test_compute.py:625
-#: nova/tests/compute/test_compute.py:660
-#: nova/tests/compute/test_compute.py:686
-#: nova/tests/compute/test_compute.py:716
+#: nova/tests/compute/test_compute.py:632
+#: nova/tests/compute/test_compute.py:667
+#: nova/tests/compute/test_compute.py:693
+#: nova/tests/compute/test_compute.py:723
#, python-format
msgid "After terminating instances: %s"
msgstr ""
-#: nova/tests/compute/test_compute.py:1093
+#: nova/tests/compute/test_compute.py:1100
msgid "Internal error"
msgstr ""
-#: nova/tests/compute/test_compute.py:2384
+#: nova/tests/compute/test_compute.py:2395
#, python-format
msgid "After force-killing instances: %s"
msgstr ""
-#: nova/tests/hyperv/hypervutils.py:141 nova/virt/hyperv/vmops.py:471
+#: nova/tests/hyperv/hypervutils.py:141 nova/virt/hyperv/vmops.py:555
#, python-format
msgid "Failed to change vm state of %(vm_name)s to %(req_state)s"
msgstr ""
-#: nova/tests/hyperv/hypervutils.py:192 nova/virt/hyperv/vmops.py:406
+#: nova/tests/hyperv/hypervutils.py:199 nova/virt/hyperv/vmops.py:490
#, python-format
msgid "Failed to destroy vm %s"
msgstr ""
-#: nova/tests/hyperv/hypervutils.py:235 nova/virt/hyperv/snapshotops.py:92
+#: nova/tests/hyperv/hypervutils.py:242 nova/virt/hyperv/snapshotops.py:93
#, python-format
msgid "Failed to get info for disk %s"
msgstr ""
-#: nova/tests/integrated/test_api_samples.py:140
+#: nova/tests/hyperv/mockproxy.py:74
+#, python-format
+msgid "Couldn't find invocation num. %(c)d of attribute \"%(name)s\""
+msgstr ""
+
+#: nova/tests/hyperv/mockproxy.py:93 nova/tests/hyperv/mockproxy.py:121
+#, python-format
+msgid "Couldn't find attribute \"%s\""
+msgstr ""
+
+#: nova/tests/hyperv/mockproxy.py:98
+#, python-format
+msgid "Couldn't find attribute \"%(name)s\" with arguments \"%(params)s\""
+msgstr ""
+
+#: nova/tests/hyperv/mockproxy.py:101
+#, python-format
+msgid ""
+"Couldn't find invocation num. %(c)d of attribute \"%(name)s\" with "
+"arguments \"%(params)s\""
+msgstr ""
+
+#: nova/tests/integrated/test_api_samples.py:141
#, python-format
msgid "Result: %(result)s is not a dict."
msgstr ""
-#: nova/tests/integrated/test_api_samples.py:144
+#: nova/tests/integrated/test_api_samples.py:145
#, python-format
msgid ""
"Key mismatch:\n"
@@ -5206,25 +5092,25 @@ msgid ""
"%(res_keys)s"
msgstr ""
-#: nova/tests/integrated/test_api_samples.py:152
+#: nova/tests/integrated/test_api_samples.py:153
#, python-format
msgid "Result: %(result)s is not a list."
msgstr ""
-#: nova/tests/integrated/test_api_samples.py:155
+#: nova/tests/integrated/test_api_samples.py:156
#, python-format
msgid ""
"Length mismatch: %(result)s\n"
"%(expected)s."
msgstr ""
-#: nova/tests/integrated/test_api_samples.py:166
+#: nova/tests/integrated/test_api_samples.py:167
#, python-format
msgid "Result: %(res_obj)s not in %(expected)s."
msgstr ""
-#: nova/tests/integrated/test_api_samples.py:183
-#: nova/tests/integrated/test_api_samples.py:196
+#: nova/tests/integrated/test_api_samples.py:185
+#: nova/tests/integrated/test_api_samples.py:198
#, python-format
msgid ""
"Values do not match:\n"
@@ -5286,259 +5172,259 @@ msgstr ""
msgid "Decoding JSON: %s"
msgstr ""
-#: nova/virt/configdrive.py:77
+#: nova/virt/configdrive.py:82
#, python-format
msgid "Added %(filepath)s to config drive"
msgstr ""
-#: nova/virt/firewall.py:176 nova/virt/libvirt/firewall.py:249
+#: nova/virt/firewall.py:180 nova/virt/libvirt/firewall.py:250
msgid "Attempted to unfilter instance which is not filtered"
msgstr ""
-#: nova/virt/firewall.py:187
+#: nova/virt/firewall.py:191
msgid "Filters added to instance"
msgstr ""
-#: nova/virt/firewall.py:189
+#: nova/virt/firewall.py:193
msgid "Provider Firewall Rules refreshed"
msgstr ""
-#: nova/virt/firewall.py:357
+#: nova/virt/firewall.py:361
#, python-format
msgid "Adding security group rule: %r"
msgstr ""
-#: nova/virt/firewall.py:489 nova/virt/xenapi/firewall.py:80
+#: nova/virt/firewall.py:492 nova/virt/xenapi/firewall.py:76
#, python-format
msgid "Adding provider rule: %s"
msgstr ""
-#: nova/virt/images.py:114
+#: nova/virt/images.py:115
msgid "Snapshot list encountered but no header found!"
msgstr ""
-#: nova/virt/images.py:213
+#: nova/virt/images.py:214
msgid "'qemu-img info' parsing failed."
msgstr ""
-#: nova/virt/images.py:219
+#: nova/virt/images.py:220
#, python-format
msgid "fmt=%(fmt)s backed by: %(backing_file)s"
msgstr ""
-#: nova/virt/images.py:230
+#: nova/virt/images.py:231
#, python-format
msgid "Converted to raw, but format is now %s"
msgstr ""
-#: nova/virt/baremetal/dom.py:93
+#: nova/virt/baremetal/dom.py:91
msgid "No domains exist."
msgstr ""
-#: nova/virt/baremetal/dom.py:95
+#: nova/virt/baremetal/dom.py:93
#, python-format
msgid "============= initial domains =========== : %s"
msgstr ""
-#: nova/virt/baremetal/dom.py:99
+#: nova/virt/baremetal/dom.py:97
msgid "Building domain: to be removed"
msgstr ""
-#: nova/virt/baremetal/dom.py:103
+#: nova/virt/baremetal/dom.py:101
msgid "Not running domain: remove"
msgstr ""
-#: nova/virt/baremetal/dom.py:111
+#: nova/virt/baremetal/dom.py:109
msgid "domain running on an unknown node: discarded"
msgstr ""
-#: nova/virt/baremetal/dom.py:127
+#: nova/virt/baremetal/dom.py:125
#, python-format
msgid "No such domain (%s)"
msgstr ""
-#: nova/virt/baremetal/dom.py:134
+#: nova/virt/baremetal/dom.py:132
#, python-format
msgid "Failed power down Bare-metal node %s"
msgstr ""
-#: nova/virt/baremetal/dom.py:143
+#: nova/virt/baremetal/dom.py:141
msgid "deactivate -> activate fails"
msgstr ""
-#: nova/virt/baremetal/dom.py:153
+#: nova/virt/baremetal/dom.py:151
msgid "destroy_domain: no such domain"
msgstr ""
-#: nova/virt/baremetal/dom.py:154
+#: nova/virt/baremetal/dom.py:152
#, python-format
msgid "No such domain %s"
msgstr ""
-#: nova/virt/baremetal/dom.py:161
+#: nova/virt/baremetal/dom.py:159
#, python-format
msgid "Domains: %s"
msgstr ""
-#: nova/virt/baremetal/dom.py:164
+#: nova/virt/baremetal/dom.py:162
#, python-format
msgid "After storing domains: %s"
msgstr ""
-#: nova/virt/baremetal/dom.py:167
+#: nova/virt/baremetal/dom.py:165
msgid "deactivation/removing domain failed"
msgstr ""
-#: nova/virt/baremetal/dom.py:174
+#: nova/virt/baremetal/dom.py:172
msgid "===== Domain is being created ====="
msgstr ""
-#: nova/virt/baremetal/dom.py:177
+#: nova/virt/baremetal/dom.py:175
msgid "Same domain name already exists"
msgstr ""
-#: nova/virt/baremetal/dom.py:179
+#: nova/virt/baremetal/dom.py:177
msgid "create_domain: before get_idle_node"
msgstr ""
-#: nova/virt/baremetal/dom.py:196
+#: nova/virt/baremetal/dom.py:194
#, python-format
msgid "Created new domain: %s"
msgstr ""
-#: nova/virt/baremetal/dom.py:211
+#: nova/virt/baremetal/dom.py:209
#, python-format
msgid "Failed to boot Bare-metal node %s"
msgstr ""
-#: nova/virt/baremetal/dom.py:220
+#: nova/virt/baremetal/dom.py:218
msgid "No such domain exists"
msgstr ""
-#: nova/virt/baremetal/dom.py:224
+#: nova/virt/baremetal/dom.py:222
#, python-format
msgid "change_domain_state: to new state %s"
msgstr ""
-#: nova/virt/baremetal/dom.py:231
+#: nova/virt/baremetal/dom.py:229
#, python-format
msgid "Stored fake domains to the file: %s"
msgstr ""
-#: nova/virt/baremetal/dom.py:242
+#: nova/virt/baremetal/dom.py:240
msgid "domain does not exist"
msgstr ""
-#: nova/virt/baremetal/driver.py:116
+#: nova/virt/baremetal/driver.py:117
#, python-format
msgid "Error encountered when destroying instance '%(name)s': %(ex)s"
msgstr ""
-#: nova/virt/baremetal/driver.py:130
+#: nova/virt/baremetal/driver.py:131
#, python-format
msgid "instance %(instance_name)s: deleting instance files %(target)s"
msgstr ""
-#: nova/virt/baremetal/driver.py:157
+#: nova/virt/baremetal/driver.py:158
#, python-format
msgid "instance %s: rebooted"
msgstr ""
-#: nova/virt/baremetal/driver.py:161
+#: nova/virt/baremetal/driver.py:162
msgid "_wait_for_reboot failed"
msgstr ""
-#: nova/virt/baremetal/driver.py:190
+#: nova/virt/baremetal/driver.py:191
#, python-format
msgid "instance %s: rescued"
msgstr ""
-#: nova/virt/baremetal/driver.py:194
+#: nova/virt/baremetal/driver.py:195
msgid "_wait_for_rescue failed"
msgstr ""
-#: nova/virt/baremetal/driver.py:211
+#: nova/virt/baremetal/driver.py:212
msgid "<============= spawn of baremetal =============>"
msgstr ""
-#: nova/virt/baremetal/driver.py:224
+#: nova/virt/baremetal/driver.py:225
#, python-format
msgid "instance %s: is building"
msgstr ""
-#: nova/virt/baremetal/driver.py:230
+#: nova/virt/baremetal/driver.py:231
msgid "Key is injected but instance is not running yet"
msgstr ""
-#: nova/virt/baremetal/driver.py:239
+#: nova/virt/baremetal/driver.py:240
#, python-format
msgid "instance %s: booted"
msgstr ""
-#: nova/virt/baremetal/driver.py:246
+#: nova/virt/baremetal/driver.py:247
#, python-format
msgid "~~~~~~ current state = %s ~~~~~~"
msgstr ""
-#: nova/virt/baremetal/driver.py:248
+#: nova/virt/baremetal/driver.py:249
#, python-format
msgid "instance %s spawned successfully"
msgstr ""
-#: nova/virt/baremetal/driver.py:251
+#: nova/virt/baremetal/driver.py:252
#, python-format
msgid "instance %s:not booted"
msgstr ""
-#: nova/virt/baremetal/driver.py:254
+#: nova/virt/baremetal/driver.py:255
msgid "Baremetal assignment is overcommitted."
msgstr ""
-#: nova/virt/baremetal/driver.py:338
+#: nova/virt/baremetal/driver.py:339
#, python-format
msgid "instance %s: Creating image"
msgstr ""
-#: nova/virt/baremetal/driver.py:456
+#: nova/virt/baremetal/driver.py:457
#, python-format
msgid "instance %(inst_name)s: injecting %(injection)s into image %(img_id)s"
msgstr ""
-#: nova/virt/baremetal/driver.py:466
+#: nova/virt/baremetal/driver.py:467
#, python-format
msgid ""
"instance %(inst_name)s: ignoring error injecting data into image "
"%(img_id)s (%(e)s)"
msgstr ""
-#: nova/virt/baremetal/driver.py:512
+#: nova/virt/baremetal/driver.py:513
#, python-format
msgid "instance %s: starting toXML method"
msgstr ""
-#: nova/virt/baremetal/driver.py:515
+#: nova/virt/baremetal/driver.py:516
#, python-format
msgid "instance %s: finished toXML method"
msgstr ""
-#: nova/virt/baremetal/driver.py:559 nova/virt/hyperv/hostops.py:43
-#: nova/virt/libvirt/driver.py:1988
+#: nova/virt/baremetal/driver.py:560 nova/virt/hyperv/hostops.py:46
+#: nova/virt/libvirt/driver.py:1989
msgid ""
"Cannot get the number of cpu, because this function is not implemented "
"for this platform. This error can be safely ignored for now."
msgstr ""
-#: nova/virt/baremetal/driver.py:682
+#: nova/virt/baremetal/driver.py:684
#, python-format
msgid "#### RLK: cpu_arch = %s "
msgstr ""
-#: nova/virt/baremetal/driver.py:699
+#: nova/virt/baremetal/driver.py:701
msgid "Updating!"
msgstr ""
-#: nova/virt/baremetal/driver.py:726 nova/virt/hyperv/hostops.py:141
-#: nova/virt/libvirt/driver.py:3030 nova/virt/xenapi/host.py:149
+#: nova/virt/baremetal/driver.py:728 nova/virt/hyperv/hostops.py:132
+#: nova/virt/libvirt/driver.py:3037 nova/virt/xenapi/host.py:156
msgid "Updating host stats"
msgstr ""
@@ -5547,146 +5433,146 @@ msgstr ""
msgid "Unknown baremetal driver %(d)s"
msgstr ""
-#: nova/virt/baremetal/tilera.py:184
+#: nova/virt/baremetal/tilera.py:185
msgid "free_node...."
msgstr ""
-#: nova/virt/baremetal/tilera.py:215
+#: nova/virt/baremetal/tilera.py:216
#, python-format
msgid "deactivate_node is called for node_id = %(id)s node_ip = %(ip)s"
msgstr ""
-#: nova/virt/baremetal/tilera.py:220
+#: nova/virt/baremetal/tilera.py:221
msgid "status of node is set to 0"
msgstr ""
-#: nova/virt/baremetal/tilera.py:231
+#: nova/virt/baremetal/tilera.py:232
msgid "rootfs is already removed"
msgstr ""
-#: nova/virt/baremetal/tilera.py:263
+#: nova/virt/baremetal/tilera.py:264
msgid "Before ping to the bare-metal node"
msgstr ""
-#: nova/virt/baremetal/tilera.py:274
+#: nova/virt/baremetal/tilera.py:275
#, python-format
msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is ready"
msgstr ""
-#: nova/virt/baremetal/tilera.py:278
+#: nova/virt/baremetal/tilera.py:279
#, python-format
msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is not ready, out_msg=%(out_msg)s"
msgstr ""
-#: nova/virt/baremetal/tilera.py:290
+#: nova/virt/baremetal/tilera.py:291
msgid "Noting to do for tilera nodes: vmlinux is in CF"
msgstr ""
-#: nova/virt/baremetal/tilera.py:313
+#: nova/virt/baremetal/tilera.py:314
msgid "activate_node"
msgstr ""
-#: nova/virt/baremetal/tilera.py:327
+#: nova/virt/baremetal/tilera.py:328
msgid "Node is unknown error state."
msgstr ""
-#: nova/virt/disk/api.py:196
+#: nova/virt/disk/api.py:190
msgid "no capable image handler configured"
msgstr ""
-#: nova/virt/disk/api.py:243
+#: nova/virt/disk/api.py:237
#, python-format
msgid "no disk image handler for: %s"
msgstr ""
-#: nova/virt/disk/api.py:255
+#: nova/virt/disk/api.py:249
msgid "image already mounted"
msgstr ""
-#: nova/virt/disk/api.py:321
+#: nova/virt/disk/api.py:315
#, python-format
msgid ""
"Failed to mount container filesystem '%(image)s' on '%(target)s': "
"%(errors)s"
msgstr ""
-#: nova/virt/disk/api.py:338
+#: nova/virt/disk/api.py:332
#, python-format
msgid "Failed to unmount container filesystem: %s"
msgstr ""
-#: nova/virt/disk/api.py:371
+#: nova/virt/disk/api.py:365
msgid "injected file path not valid"
msgstr ""
-#: nova/virt/disk/api.py:516
+#: nova/virt/disk/api.py:510
msgid "Not implemented on Windows"
msgstr ""
-#: nova/virt/disk/api.py:550
+#: nova/virt/disk/api.py:544
#, python-format
msgid "User %(username)s not found in password file."
msgstr ""
-#: nova/virt/disk/api.py:566
+#: nova/virt/disk/api.py:560
#, python-format
msgid "User %(username)s not found in shadow file."
msgstr ""
-#: nova/virt/disk/guestfs.py:41
+#: nova/virt/disk/mount/api.py:83
#, python-format
-msgid "unsupported partition: %s"
+msgid "partition search unsupported with %s"
msgstr ""
-#: nova/virt/disk/guestfs.py:66 nova/virt/disk/guestfs.py:80
-#: nova/virt/disk/mount.py:132
+#: nova/virt/disk/mount/api.py:99
#, python-format
-msgid "Failed to mount filesystem: %s"
+msgid "partition %s not found"
msgstr ""
-#: nova/virt/disk/guestfs.py:79
-msgid "unknown guestmount error"
+#: nova/virt/disk/mount/api.py:100
+#, python-format
+msgid "Failed to map partitions: %s"
msgstr ""
-#: nova/virt/disk/guestfs.py:119
+#: nova/virt/disk/mount/api.py:132 nova/virt/disk/mount/guestfs.py:66
+#: nova/virt/disk/mount/guestfs.py:80
#, python-format
-msgid "Failed to umount image at %s, guestmount was still running after 10s"
+msgid "Failed to mount filesystem: %s"
msgstr ""
-#: nova/virt/disk/loop.py:31
+#: nova/virt/disk/mount/guestfs.py:41
#, python-format
-msgid "Could not attach image to loopback: %s"
+msgid "unsupported partition: %s"
msgstr ""
-#: nova/virt/disk/mount.py:83
-#, python-format
-msgid "partition search unsupported with %s"
+#: nova/virt/disk/mount/guestfs.py:79
+msgid "unknown guestmount error"
msgstr ""
-#: nova/virt/disk/mount.py:99
+#: nova/virt/disk/mount/guestfs.py:119
#, python-format
-msgid "partition %s not found"
+msgid "Failed to umount image at %s, guestmount was still running after 10s"
msgstr ""
-#: nova/virt/disk/mount.py:100
+#: nova/virt/disk/mount/loop.py:31
#, python-format
-msgid "Failed to map partitions: %s"
+msgid "Could not attach image to loopback: %s"
msgstr ""
-#: nova/virt/disk/nbd.py:59
+#: nova/virt/disk/mount/nbd.py:60
msgid "nbd unavailable: module not loaded"
msgstr ""
-#: nova/virt/disk/nbd.py:64
+#: nova/virt/disk/mount/nbd.py:65
msgid "No free nbd devices"
msgstr ""
-#: nova/virt/disk/nbd.py:86
+#: nova/virt/disk/mount/nbd.py:87
#, python-format
msgid "qemu-nbd error: %s"
msgstr ""
-#: nova/virt/disk/nbd.py:98
+#: nova/virt/disk/mount/nbd.py:99
#, python-format
msgid "nbd device %s did not show up"
msgstr ""
@@ -5719,335 +5605,349 @@ msgstr ""
msgid "get_console_output called"
msgstr ""
-#: nova/virt/hyperv/hostops.py:112
+#: nova/virt/hyperv/hostops.py:102
#, python-format
msgid "Windows version: %s "
msgstr ""
-#: nova/virt/hyperv/hostops.py:124
+#: nova/virt/hyperv/hostops.py:114
msgid "get_available_resource called"
msgstr ""
-#: nova/virt/hyperv/hostops.py:161
+#: nova/virt/hyperv/hostops.py:152
msgid "get_host_stats called"
msgstr ""
-#: nova/virt/hyperv/livemigrationops.py:52
+#: nova/virt/hyperv/livemigrationops.py:53
msgid ""
"Live migration is not supported \" \"by this version "
"of Hyper-V"
msgstr ""
-#: nova/virt/hyperv/livemigrationops.py:61
+#: nova/virt/hyperv/livemigrationops.py:62
msgid "Live migration is not enabled on this host"
msgstr ""
-#: nova/virt/hyperv/livemigrationops.py:64
+#: nova/virt/hyperv/livemigrationops.py:65
msgid "Live migration networks are not configured on this host"
msgstr ""
-#: nova/virt/hyperv/livemigrationops.py:68
+#: nova/virt/hyperv/livemigrationops.py:69
msgid "live_migration called"
msgstr ""
-#: nova/virt/hyperv/livemigrationops.py:94
+#: nova/virt/hyperv/livemigrationops.py:95
#, python-format
msgid "Getting live migration networks for remote host: %s"
msgstr ""
-#: nova/virt/hyperv/livemigrationops.py:113
+#: nova/virt/hyperv/livemigrationops.py:114
#, python-format
msgid "Starting live migration for instance: %s"
msgstr ""
-#: nova/virt/hyperv/livemigrationops.py:126
+#: nova/virt/hyperv/livemigrationops.py:127
#, python-format
msgid "Failed to live migrate VM %s"
msgstr ""
-#: nova/virt/hyperv/livemigrationops.py:129
+#: nova/virt/hyperv/livemigrationops.py:130
#, python-format
msgid "Calling live migration recover_method for instance: %s"
msgstr ""
-#: nova/virt/hyperv/livemigrationops.py:133
+#: nova/virt/hyperv/livemigrationops.py:134
#, python-format
msgid "Calling live migration post_method for instance: %s"
msgstr ""
-#: nova/virt/hyperv/livemigrationops.py:139
+#: nova/virt/hyperv/livemigrationops.py:140
msgid "pre_live_migration called"
msgstr ""
-#: nova/virt/hyperv/livemigrationops.py:157
+#: nova/virt/hyperv/livemigrationops.py:158
msgid "post_live_migration_at_destination called"
msgstr ""
-#: nova/virt/hyperv/livemigrationops.py:161
+#: nova/virt/hyperv/livemigrationops.py:162
#, python-format
msgid "compare_cpu called %s"
msgstr ""
-#: nova/virt/hyperv/snapshotops.py:57
+#: nova/virt/hyperv/snapshotops.py:58
#, python-format
msgid "Creating snapshot for instance %s"
msgstr ""
-#: nova/virt/hyperv/snapshotops.py:71
+#: nova/virt/hyperv/snapshotops.py:72
#, python-format
msgid "Failed to create snapshot for VM %s"
msgstr ""
-#: nova/virt/hyperv/snapshotops.py:83
+#: nova/virt/hyperv/snapshotops.py:84
#, python-format
msgid "Getting info for VHD %s"
msgstr ""
-#: nova/virt/hyperv/snapshotops.py:106
+#: nova/virt/hyperv/snapshotops.py:107
#, python-format
msgid "Copying VHD %(src_vhd_path)s to %(dest_vhd_path)s"
msgstr ""
-#: nova/virt/hyperv/snapshotops.py:116
+#: nova/virt/hyperv/snapshotops.py:117
#, python-format
msgid "Copying base disk %(src_vhd_path)s to %(dest_base_disk_path)s"
msgstr ""
-#: nova/virt/hyperv/snapshotops.py:120
+#: nova/virt/hyperv/snapshotops.py:121
#, python-format
msgid ""
"Reconnecting copied base VHD %(dest_base_disk_path)s and diff VHD "
"%(dest_vhd_path)s"
msgstr ""
-#: nova/virt/hyperv/snapshotops.py:134
+#: nova/virt/hyperv/snapshotops.py:135
#, python-format
msgid ""
"Failed to reconnect base disk %(dest_base_disk_path)s and diff disk "
"%(dest_vhd_path)s"
msgstr ""
-#: nova/virt/hyperv/snapshotops.py:139
+#: nova/virt/hyperv/snapshotops.py:140
#, python-format
msgid "Merging base disk %(dest_base_disk_path)s and diff disk %(dest_vhd_path)s"
msgstr ""
-#: nova/virt/hyperv/snapshotops.py:151
+#: nova/virt/hyperv/snapshotops.py:152
#, python-format
msgid ""
"Failed to merge base disk %(dest_base_disk_path)s and diff disk "
"%(dest_vhd_path)s"
msgstr ""
-#: nova/virt/hyperv/snapshotops.py:164
+#: nova/virt/hyperv/snapshotops.py:165
#, python-format
msgid ""
"Updating Glance image %(image_id)s with content from merged disk "
"%(image_vhd_path)s"
msgstr ""
-#: nova/virt/hyperv/snapshotops.py:169
+#: nova/virt/hyperv/snapshotops.py:170
#, python-format
msgid "Snapshot image %(image_id)s updated for VM %(instance_name)s"
msgstr ""
-#: nova/virt/hyperv/snapshotops.py:172
+#: nova/virt/hyperv/snapshotops.py:173
#, python-format
msgid "Removing snapshot %s"
msgstr ""
-#: nova/virt/hyperv/snapshotops.py:181
+#: nova/virt/hyperv/snapshotops.py:182
#, python-format
msgid "Failed to remove snapshot for VM %s"
msgstr ""
-#: nova/virt/hyperv/snapshotops.py:186
+#: nova/virt/hyperv/snapshotops.py:187
#, python-format
msgid "Removing folder %s "
msgstr ""
-#: nova/virt/hyperv/vmops.py:67
+#: nova/virt/hyperv/vmops.py:80
msgid "get_info called for instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:91
+#: nova/virt/hyperv/vmops.py:104
#, python-format
msgid "hyperv vm state: %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:97
+#: nova/virt/hyperv/vmops.py:110
#, python-format
msgid ""
"Got Info for vm %(instance_name)s: state=%(state)s, mem=%(memusage)s, "
"num_cpu=%(numprocs)s, uptime=%(uptime)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:133
+#: nova/virt/hyperv/vmops.py:146
#, python-format
msgid "cache image failed: %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:152
+#: nova/virt/hyperv/vmops.py:170
#, python-format
msgid "Starting VM %s "
msgstr ""
-#: nova/virt/hyperv/vmops.py:154
+#: nova/virt/hyperv/vmops.py:172
#, python-format
msgid "Started VM %s "
msgstr ""
-#: nova/virt/hyperv/vmops.py:156
+#: nova/virt/hyperv/vmops.py:174
#, python-format
msgid "spawn vm failed: %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:175
+#: nova/virt/hyperv/vmops.py:180
+#, python-format
+msgid "Invalid config_drive_format \"%s\""
+msgstr ""
+
+#: nova/virt/hyperv/vmops.py:183 nova/virt/libvirt/driver.py:1406
+msgid "Using config drive"
+msgstr ""
+
+#: nova/virt/hyperv/vmops.py:194 nova/virt/libvirt/driver.py:1416
+#, python-format
+msgid "Creating config drive at %(path)s"
+msgstr ""
+
+#: nova/virt/hyperv/vmops.py:239
#, python-format
msgid "Failed to create VM %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:178
+#: nova/virt/hyperv/vmops.py:242
#, python-format
msgid "Created VM %s..."
msgstr ""
-#: nova/virt/hyperv/vmops.py:195
+#: nova/virt/hyperv/vmops.py:259
#, python-format
msgid "Set memory for vm %s..."
msgstr ""
-#: nova/virt/hyperv/vmops.py:208
+#: nova/virt/hyperv/vmops.py:272
#, python-format
msgid "Set vcpus for vm %s..."
msgstr ""
-#: nova/virt/hyperv/vmops.py:212
+#: nova/virt/hyperv/vmops.py:276
#, python-format
msgid "Creating a scsi controller for %(vm_name)s for volume attaching"
msgstr ""
-#: nova/virt/hyperv/vmops.py:221
+#: nova/virt/hyperv/vmops.py:285
msgid "Controller not found"
msgstr ""
-#: nova/virt/hyperv/vmops.py:229
+#: nova/virt/hyperv/vmops.py:293
#, python-format
msgid "Failed to add scsi controller to VM %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:234
+#: nova/virt/hyperv/vmops.py:310
#, python-format
-msgid "Creating disk for %(vm_name)s by attaching disk file %(vhdfile)s"
+msgid "Creating disk for %(vm_name)s by attaching disk file %(path)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:261
+#: nova/virt/hyperv/vmops.py:338
#, python-format
-msgid "Failed to add diskdrive to VM %s"
+msgid "Failed to add drive to VM %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:264
+#: nova/virt/hyperv/vmops.py:341
#, python-format
-msgid "New disk drive path is %s"
+msgid "New %(drive_type)s drive path is %(drive_path)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:283
+#: nova/virt/hyperv/vmops.py:366
#, python-format
-msgid "Failed to add vhd file to VM %s"
+msgid "Failed to add %(drive_type)s image to VM %(vm_name)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:285
+#: nova/virt/hyperv/vmops.py:368
#, python-format
-msgid "Created disk for %s"
+msgid "Created drive type %(drive_type)s for %(vm_name)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:289
+#: nova/virt/hyperv/vmops.py:373
#, python-format
msgid "Creating nic for %s "
msgstr ""
-#: nova/virt/hyperv/vmops.py:294
+#: nova/virt/hyperv/vmops.py:378
msgid "Cannot find vSwitch"
msgstr ""
-#: nova/virt/hyperv/vmops.py:314
+#: nova/virt/hyperv/vmops.py:398
msgid "Failed creating a port on the external vswitch"
msgstr ""
-#: nova/virt/hyperv/vmops.py:315
+#: nova/virt/hyperv/vmops.py:399
#, python-format
msgid "Failed creating port for %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:318
+#: nova/virt/hyperv/vmops.py:402
#, python-format
msgid "Created switch port %(vm_name)s on switch %(ext_path)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:330
+#: nova/virt/hyperv/vmops.py:414
#, python-format
msgid "Failed to add nic to VM %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:332
+#: nova/virt/hyperv/vmops.py:416
#, python-format
msgid "Created nic for %s "
msgstr ""
-#: nova/virt/hyperv/vmops.py:339 nova/virt/hyperv/vmops.py:342
+#: nova/virt/hyperv/vmops.py:423 nova/virt/hyperv/vmops.py:426
#, python-format
msgid "Attempting to bind NIC to %s "
msgstr ""
-#: nova/virt/hyperv/vmops.py:347
+#: nova/virt/hyperv/vmops.py:431
msgid "No vSwitch specified, attaching to default"
msgstr ""
-#: nova/virt/hyperv/vmops.py:372
+#: nova/virt/hyperv/vmops.py:456
#, python-format
msgid "Got request to destroy vm %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:416
+#: nova/virt/hyperv/vmops.py:500
#, python-format
msgid "Del: disk %(vhdfile)s vm %(instance_name)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:422
+#: nova/virt/hyperv/vmops.py:506
msgid "Pause instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:427
+#: nova/virt/hyperv/vmops.py:511
msgid "Unpause instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:433
+#: nova/virt/hyperv/vmops.py:517
msgid "Suspend instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:438
+#: nova/virt/hyperv/vmops.py:522
msgid "Resume instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:443
+#: nova/virt/hyperv/vmops.py:527
msgid "Power off instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:448
+#: nova/virt/hyperv/vmops.py:532
msgid "Power on instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:468
+#: nova/virt/hyperv/vmops.py:552
#, python-format
msgid "Successfully changed vm state of %(vm_name)s to %(req_state)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:497
+#: nova/virt/hyperv/vmops.py:581
#, python-format
msgid "use_cow_image:%s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:517
+#: nova/virt/hyperv/vmops.py:601
#, python-format
msgid "Failed to create Difference Disk from %(base)s to %(target)s"
msgstr ""
@@ -6069,407 +5969,398 @@ msgstr ""
msgid "WMI job succeeded: %(desc)s, Elapsed=%(elap)s "
msgstr ""
-#: nova/virt/hyperv/vmutils.py:80 nova/virt/hyperv/vmutils.py:96
+#: nova/virt/hyperv/vmutils.py:80 nova/virt/hyperv/vmutils.py:100
#, python-format
msgid "Creating folder %s "
msgstr ""
-#: nova/virt/hyperv/vmutils.py:94
+#: nova/virt/hyperv/vmutils.py:98
#, python-format
msgid "Removing existing folder %s "
msgstr ""
-#: nova/virt/hyperv/volumeops.py:69 nova/virt/xenapi/vm_utils.py:483
+#: nova/virt/hyperv/volumeops.py:70 nova/virt/xenapi/vm_utils.py:510
#, python-format
msgid "block device info: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:97
+#: nova/virt/hyperv/volumeops.py:98
#, python-format
msgid "Attach boot from volume failed: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:100
+#: nova/virt/hyperv/volumeops.py:101
#, python-format
msgid "Unable to attach boot volume to instance %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:109 nova/virt/xenapi/volumeops.py:114
+#: nova/virt/hyperv/volumeops.py:110 nova/virt/xenapi/volumeops.py:114
#, python-format
msgid "Attach_volume: %(connection_info)s, %(instance_name)s, %(mountpoint)s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:134
+#: nova/virt/hyperv/volumeops.py:135
#, python-format
msgid "Attach volume failed: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:137 nova/virt/xenapi/volumeops.py:190
+#: nova/virt/hyperv/volumeops.py:138 nova/virt/xenapi/volumeops.py:190
#, python-format
msgid "Unable to attach volume to instance %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:156
+#: nova/virt/hyperv/volumeops.py:157
#, python-format
msgid "Failed to add volume to VM %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:170
+#: nova/virt/hyperv/volumeops.py:171
#, python-format
msgid "Detach_volume: %(connection_info)s, %(instance_name)s, %(mountpoint)s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:187
+#: nova/virt/hyperv/volumeops.py:188
#, python-format
msgid "Mounted disk to detach is: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:188
+#: nova/virt/hyperv/volumeops.py:189
#, python-format
msgid "host_resource disk detached is: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:191
+#: nova/virt/hyperv/volumeops.py:192
#, python-format
msgid "Physical disk detached is: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:198
+#: nova/virt/hyperv/volumeops.py:199
#, python-format
msgid "Failed to remove volume from VM %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:207 nova/virt/libvirt/driver.py:605
+#: nova/virt/hyperv/volumeops.py:208 nova/virt/libvirt/driver.py:603
msgid "Could not determine iscsi initiator name"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:224
+#: nova/virt/hyperv/volumeops.py:225
#, python-format
msgid "device.InitiatorName: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:225
+#: nova/virt/hyperv/volumeops.py:226
#, python-format
msgid "device.TargetName: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:226
+#: nova/virt/hyperv/volumeops.py:227
#, python-format
msgid "device.ScsiPortNumber: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:227
+#: nova/virt/hyperv/volumeops.py:228
#, python-format
msgid "device.ScsiPathId: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:228
+#: nova/virt/hyperv/volumeops.py:229
#, python-format
msgid "device.ScsiTargetId): %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:229
+#: nova/virt/hyperv/volumeops.py:230
#, python-format
msgid "device.ScsiLun: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:230
+#: nova/virt/hyperv/volumeops.py:231
#, python-format
msgid "device.DeviceInterfaceGuid :%s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:232
+#: nova/virt/hyperv/volumeops.py:233
#, python-format
msgid "device.DeviceInterfaceName: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:234
+#: nova/virt/hyperv/volumeops.py:235
#, python-format
msgid "device.LegacyName: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:235
+#: nova/virt/hyperv/volumeops.py:236
#, python-format
msgid "device.DeviceType: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:236
+#: nova/virt/hyperv/volumeops.py:237
#, python-format
msgid "device.DeviceNumber %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:237
+#: nova/virt/hyperv/volumeops.py:238
#, python-format
msgid "device.PartitionNumber :%s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:243 nova/virt/hyperv/volumeops.py:262
+#: nova/virt/hyperv/volumeops.py:244 nova/virt/hyperv/volumeops.py:263
#, python-format
msgid "Unable to find a mounted disk for target_iqn: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:245
+#: nova/virt/hyperv/volumeops.py:246
#, python-format
msgid "Device number : %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:246
+#: nova/virt/hyperv/volumeops.py:247
#, python-format
msgid "Target lun : %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:252 nova/virt/hyperv/volumeops.py:259
+#: nova/virt/hyperv/volumeops.py:253 nova/virt/hyperv/volumeops.py:260
#, python-format
msgid "Mounted disk is: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:276
+#: nova/virt/hyperv/volumeops.py:277
#, python-format
msgid "Drive number to disconnect is: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:283
+#: nova/virt/hyperv/volumeops.py:284
#, python-format
msgid "DeviceNumber : %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:288
+#: nova/virt/hyperv/volumeops.py:289
#, python-format
msgid "Disk path to parse: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:290
+#: nova/virt/hyperv/volumeops.py:291
#, python-format
msgid "start_device_id: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:292
+#: nova/virt/hyperv/volumeops.py:293
#, python-format
msgid "end_device_id: %s"
msgstr ""
-#: nova/virt/hyperv/volumeutils.py:51
+#: nova/virt/hyperv/volumeutils.py:52
#, python-format
msgid "An error has occurred when calling the iscsi initiator: %s"
msgstr ""
-#: nova/virt/hyperv/volumeutils.py:68
+#: nova/virt/hyperv/volumeutils.py:69
msgid "The ISCSI initiator name can't be found. Choosing the default one"
msgstr ""
-#: nova/virt/hyperv/volumeutils.py:121 nova/virt/libvirt/driver.py:1464
-#: nova/virt/xenapi/vm_utils.py:476
+#: nova/virt/hyperv/volumeutils.py:122 nova/virt/libvirt/driver.py:1467
+#: nova/virt/xenapi/vm_utils.py:503
#, python-format
msgid "block_device_list %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:334
+#: nova/virt/libvirt/driver.py:332
#, python-format
msgid "Nova requires libvirt version %(major)i.%(minor)i.%(micro)i or greater."
msgstr ""
-#: nova/virt/libvirt/driver.py:340
+#: nova/virt/libvirt/driver.py:338
#, python-format
msgid "Connecting to libvirt: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:361
+#: nova/virt/libvirt/driver.py:359
msgid "Connection to libvirt broke"
msgstr ""
-#: nova/virt/libvirt/driver.py:383 nova/virt/libvirt/driver.py:386
+#: nova/virt/libvirt/driver.py:381 nova/virt/libvirt/driver.py:384
#, python-format
msgid "Can not handle authentication request for %d credentials"
msgstr ""
-#: nova/virt/libvirt/driver.py:468
+#: nova/virt/libvirt/driver.py:466
#, python-format
msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:482
+#: nova/virt/libvirt/driver.py:480
msgid "During wait destroy, instance disappeared."
msgstr ""
-#: nova/virt/libvirt/driver.py:487
+#: nova/virt/libvirt/driver.py:485
msgid "Instance destroyed successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:509
+#: nova/virt/libvirt/driver.py:507
msgid "Error from libvirt during undefineFlags. Retrying with undefine"
msgstr ""
-#: nova/virt/libvirt/driver.py:524
+#: nova/virt/libvirt/driver.py:522
#, python-format
msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:537
+#: nova/virt/libvirt/driver.py:535
#, python-format
msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:554
+#: nova/virt/libvirt/driver.py:552
#, python-format
msgid "Deleting instance files %(target)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:568
+#: nova/virt/libvirt/driver.py:566
#, python-format
msgid "Failed to cleanup directory %(target)s: %(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:730
+#: nova/virt/libvirt/driver.py:726
msgid "During detach_volume, instance disappeared."
msgstr ""
-#: nova/virt/libvirt/driver.py:740
+#: nova/virt/libvirt/driver.py:736
msgid "attaching LXC block device"
msgstr ""
-#: nova/virt/libvirt/driver.py:753
+#: nova/virt/libvirt/driver.py:756
msgid "detaching LXC block device"
msgstr ""
-#: nova/virt/libvirt/driver.py:885
+#: nova/virt/libvirt/driver.py:888
msgid "Instance soft rebooted successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:889
+#: nova/virt/libvirt/driver.py:892
msgid "Failed to soft reboot instance."
msgstr ""
-#: nova/virt/libvirt/driver.py:921
+#: nova/virt/libvirt/driver.py:924
msgid "Instance shutdown successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:956
+#: nova/virt/libvirt/driver.py:960
msgid "Instance rebooted successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:1086
+#: nova/virt/libvirt/driver.py:1090
msgid "Instance is running"
msgstr ""
-#: nova/virt/libvirt/driver.py:1093 nova/virt/powervm/operator.py:253
+#: nova/virt/libvirt/driver.py:1097 nova/virt/powervm/operator.py:257
msgid "Instance spawned successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:1109
+#: nova/virt/libvirt/driver.py:1113
#, python-format
msgid "data: %(data)r, fpath: %(fpath)r"
msgstr ""
-#: nova/virt/libvirt/driver.py:1155
+#: nova/virt/libvirt/driver.py:1159
msgid "Guest does not have a console available"
msgstr ""
-#: nova/virt/libvirt/driver.py:1199
+#: nova/virt/libvirt/driver.py:1203
#, python-format
msgid "Path '%(path)s' supports direct I/O"
msgstr ""
-#: nova/virt/libvirt/driver.py:1203
+#: nova/virt/libvirt/driver.py:1207
#, python-format
msgid "Path '%(path)s' does not support direct I/O: '%(ex)s'"
msgstr ""
-#: nova/virt/libvirt/driver.py:1207 nova/virt/libvirt/driver.py:1211
+#: nova/virt/libvirt/driver.py:1211 nova/virt/libvirt/driver.py:1215
#, python-format
msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'"
msgstr ""
-#: nova/virt/libvirt/driver.py:1277
+#: nova/virt/libvirt/driver.py:1281
msgid "Creating image"
msgstr ""
-#: nova/virt/libvirt/driver.py:1403
-msgid "Using config drive"
-msgstr ""
-
-#: nova/virt/libvirt/driver.py:1413
-#, python-format
-msgid "Creating config drive at %(path)s"
-msgstr ""
-
-#: nova/virt/libvirt/driver.py:1427
+#: nova/virt/libvirt/driver.py:1430
#, python-format
msgid "Injecting %(injection)s into image %(img_id)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:1437
+#: nova/virt/libvirt/driver.py:1440
#, python-format
msgid "Ignoring error injecting data into image %(img_id)s (%(e)s)"
msgstr ""
-#: nova/virt/libvirt/driver.py:1511
+#: nova/virt/libvirt/driver.py:1514
#, python-format
msgid ""
"Config requested an explicit CPU model, but the current libvirt "
"hypervisor '%s' does not support selecting CPU models"
msgstr ""
-#: nova/virt/libvirt/driver.py:1517
+#: nova/virt/libvirt/driver.py:1520
msgid "Config requested a custom CPU model, but no model name was provided"
msgstr ""
-#: nova/virt/libvirt/driver.py:1521
+#: nova/virt/libvirt/driver.py:1524
msgid "A CPU model name should not be set when a host CPU model is requested"
msgstr ""
-#: nova/virt/libvirt/driver.py:1525
+#: nova/virt/libvirt/driver.py:1528
#, python-format
msgid "CPU mode '%(mode)s' model '%(model)s' was chosen"
msgstr ""
-#: nova/virt/libvirt/driver.py:1541
+#: nova/virt/libvirt/driver.py:1544
msgid ""
"Passthrough of the host CPU was requested but this libvirt version does "
"not support this feature"
msgstr ""
-#: nova/virt/libvirt/driver.py:1833
+#: nova/virt/libvirt/driver.py:1834
msgid "Starting toXML method"
msgstr ""
-#: nova/virt/libvirt/driver.py:1837
+#: nova/virt/libvirt/driver.py:1838
msgid "Finished toXML method"
msgstr ""
-#: nova/virt/libvirt/driver.py:1854
+#: nova/virt/libvirt/driver.py:1855
#, python-format
msgid ""
"Error from libvirt while looking up %(instance_name)s: [Error Code "
"%(error_code)s] %(ex)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2106
+#: nova/virt/libvirt/driver.py:2107
msgid "libvirt version is too old (does not support getVersion)"
msgstr ""
-#: nova/virt/libvirt/driver.py:2293
+#: nova/virt/libvirt/driver.py:2295
msgid "Block migration can not be used with shared storage."
msgstr ""
-#: nova/virt/libvirt/driver.py:2301
+#: nova/virt/libvirt/driver.py:2303
msgid "Live migration can not be used without shared storage."
msgstr ""
-#: nova/virt/libvirt/driver.py:2336
+#: nova/virt/libvirt/driver.py:2338
#, python-format
msgid ""
"Unable to migrate %(instance_uuid)s: Disk of instance is too "
"large(available on destination host:%(available)s < need:%(necessary)s)"
msgstr ""
-#: nova/virt/libvirt/driver.py:2356
+#: nova/virt/libvirt/driver.py:2363
#, python-format
msgid ""
"Instance launched has CPU info:\n"
"%s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2368
+#: nova/virt/libvirt/driver.py:2375
#, python-format
msgid ""
"CPU doesn't have compatibility.\n"
@@ -6479,51 +6370,51 @@ msgid ""
"Refer to %(u)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2385
+#: nova/virt/libvirt/driver.py:2392
#, python-format
msgid ""
"Creating tmpfile %s to notify to other compute nodes that they should "
"mount the same storage."
msgstr ""
-#: nova/virt/libvirt/driver.py:2433
+#: nova/virt/libvirt/driver.py:2440
#, python-format
msgid "The firewall filter for %s does not exist"
msgstr ""
-#: nova/virt/libvirt/driver.py:2503
+#: nova/virt/libvirt/driver.py:2510
#, python-format
msgid "Live Migration failure: %(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2547
+#: nova/virt/libvirt/driver.py:2554
#, python-format
msgid "plug_vifs() failed %(cnt)d.Retry up to %(max_retry)d for %(hostname)s."
msgstr ""
-#: nova/virt/libvirt/driver.py:2674
+#: nova/virt/libvirt/driver.py:2681
#, python-format
msgid "skipping %(path)s since it looks like volume"
msgstr ""
-#: nova/virt/libvirt/driver.py:2723
+#: nova/virt/libvirt/driver.py:2730
#, python-format
msgid "Getting disk size of %(i_name)s: %(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2785
+#: nova/virt/libvirt/driver.py:2792
msgid "Starting migrate_disk_and_power_off"
msgstr ""
-#: nova/virt/libvirt/driver.py:2844
+#: nova/virt/libvirt/driver.py:2851
msgid "Instance running successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:2851
+#: nova/virt/libvirt/driver.py:2858
msgid "Starting finish_migration"
msgstr ""
-#: nova/virt/libvirt/driver.py:2902
+#: nova/virt/libvirt/driver.py:2909
msgid "Starting finish_revert_migration"
msgstr ""
@@ -6533,158 +6424,158 @@ msgid ""
"correctly."
msgstr ""
-#: nova/virt/libvirt/firewall.py:102
+#: nova/virt/libvirt/firewall.py:103
msgid "Called setup_basic_filtering in nwfilter"
msgstr ""
-#: nova/virt/libvirt/firewall.py:110
+#: nova/virt/libvirt/firewall.py:111
msgid "Ensuring static filters"
msgstr ""
-#: nova/virt/libvirt/firewall.py:191
+#: nova/virt/libvirt/firewall.py:192
#, python-format
msgid "The nwfilter(%(instance_filter_name)s) is not found."
msgstr ""
-#: nova/virt/libvirt/firewall.py:214
+#: nova/virt/libvirt/firewall.py:215
#, python-format
msgid "The nwfilter(%(instance_filter_name)s) for%(name)s is not found."
msgstr ""
-#: nova/virt/libvirt/firewall.py:230
+#: nova/virt/libvirt/firewall.py:231
msgid "iptables firewall: Setup Basic Filtering"
msgstr ""
-#: nova/virt/libvirt/imagebackend.py:213
+#: nova/virt/libvirt/imagebackend.py:214
msgid "You should specify libvirt_images_volume_group flag to use LVM images."
msgstr ""
-#: nova/virt/libvirt/imagebackend.py:276
+#: nova/virt/libvirt/imagebackend.py:277
#, python-format
msgid "Unknown image_type=%s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:166
+#: nova/virt/libvirt/imagecache.py:165
#, python-format
msgid "%s is a valid instance name"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:169
+#: nova/virt/libvirt/imagecache.py:168
#, python-format
msgid "%s has a disk file"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:171
+#: nova/virt/libvirt/imagecache.py:170
#, python-format
msgid "Instance %(instance)s is backed by %(backing)s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:184
+#: nova/virt/libvirt/imagecache.py:183
#, python-format
msgid ""
"Instance %(instance)s is using a backing file %(backing)s which does not "
"appear in the image service"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:239
+#: nova/virt/libvirt/imagecache.py:238
#, python-format
msgid "%(id)s (%(base_file)s): image verification failed"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:249
+#: nova/virt/libvirt/imagecache.py:248
#, python-format
msgid "%(id)s (%(base_file)s): image verification skipped, no hash stored"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:268
+#: nova/virt/libvirt/imagecache.py:267
#, python-format
msgid "Cannot remove %(base_file)s, it does not exist"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:280
+#: nova/virt/libvirt/imagecache.py:279
#, python-format
msgid "Base file too young to remove: %s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:283
+#: nova/virt/libvirt/imagecache.py:282
#, python-format
msgid "Removing base file: %s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:290
+#: nova/virt/libvirt/imagecache.py:289
#, python-format
msgid "Failed to remove %(base_file)s, error was %(error)s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:301
+#: nova/virt/libvirt/imagecache.py:300
#, python-format
msgid "%(id)s (%(base_file)s): checking"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:320
+#: nova/virt/libvirt/imagecache.py:319
#, python-format
msgid ""
"%(id)s (%(base_file)s): in use: on this node %(local)d local, %(remote)d "
"on other nodes"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:332
+#: nova/virt/libvirt/imagecache.py:331
#, python-format
msgid ""
"%(id)s (%(base_file)s): warning -- an absent base file is in use! "
"instances: %(instance_list)s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:340
+#: nova/virt/libvirt/imagecache.py:339
#, python-format
msgid "%(id)s (%(base_file)s): in use on (%(remote)d on other nodes)"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:350
+#: nova/virt/libvirt/imagecache.py:349
#, python-format
msgid "%(id)s (%(base_file)s): image is not in use"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:356
+#: nova/virt/libvirt/imagecache.py:355
#, python-format
msgid "%(id)s (%(base_file)s): image is in use"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:379
+#: nova/virt/libvirt/imagecache.py:378
#, python-format
msgid "Skipping verification, no base directory at %s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:383
+#: nova/virt/libvirt/imagecache.py:382
msgid "Verify base images"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:390
+#: nova/virt/libvirt/imagecache.py:389
#, python-format
msgid "Image id %(id)s yields fingerprint %(fingerprint)s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:408
+#: nova/virt/libvirt/imagecache.py:407
#, python-format
msgid "Unknown base file: %s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:413
+#: nova/virt/libvirt/imagecache.py:412
#, python-format
msgid "Active base files: %s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:416
+#: nova/virt/libvirt/imagecache.py:415
#, python-format
msgid "Corrupt base files: %s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:420
+#: nova/virt/libvirt/imagecache.py:419
#, python-format
msgid "Removable base files: %s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:428
+#: nova/virt/libvirt/imagecache.py:427
msgid "Verification complete"
msgstr ""
@@ -6693,14 +6584,14 @@ msgstr ""
msgid "LVM snapshots not implemented"
msgstr ""
-#: nova/virt/libvirt/utils.py:134
+#: nova/virt/libvirt/utils.py:133
#, python-format
msgid ""
"Insufficient Space on Volume Group %(vg)s. Only %(free_space)db "
"available, but %(size)db required by volume %(lv)s."
msgstr ""
-#: nova/virt/libvirt/utils.py:143
+#: nova/virt/libvirt/utils.py:142
#, python-format
msgid ""
"Volume group %(vg)s will not be able to hold sparse volume %(lv)s. "
@@ -6708,73 +6599,72 @@ msgid ""
"%(free_space)db."
msgstr ""
-#: nova/virt/libvirt/utils.py:190
+#: nova/virt/libvirt/utils.py:189
#, python-format
msgid "Path %s must be LVM logical volume"
msgstr ""
-#: nova/virt/libvirt/utils.py:409
+#: nova/virt/libvirt/utils.py:408
msgid "Can't retrieve root device path from instance libvirt configuration"
msgstr ""
-#: nova/virt/libvirt/utils.py:498
+#: nova/virt/libvirt/utils.py:497
#, python-format
msgid "Reading image info file: %s"
msgstr ""
-#: nova/virt/libvirt/utils.py:502
+#: nova/virt/libvirt/utils.py:501
#, python-format
msgid "Read: %s"
msgstr ""
-#: nova/virt/libvirt/utils.py:508
+#: nova/virt/libvirt/utils.py:507
#, python-format
msgid "Error reading image info file %(filename)s: %(error)s"
msgstr ""
-#: nova/virt/libvirt/utils.py:532
+#: nova/virt/libvirt/utils.py:531
#, python-format
msgid "Writing image info file: %s"
msgstr ""
-#: nova/virt/libvirt/utils.py:533
+#: nova/virt/libvirt/utils.py:532
#, python-format
msgid "Wrote: %s"
msgstr ""
-#: nova/virt/libvirt/vif.py:97
+#: nova/virt/libvirt/vif.py:96
#, python-format
msgid "Ensuring vlan %(vlan)s and bridge %(bridge)s"
msgstr ""
-#: nova/virt/libvirt/vif.py:107
+#: nova/virt/libvirt/vif.py:106
#, python-format
msgid "Ensuring bridge %s"
msgstr ""
-#: nova/virt/libvirt/vif.py:183 nova/virt/libvirt/vif.py:249
-#: nova/virt/libvirt/vif.py:309
+#: nova/virt/libvirt/vif.py:182 nova/virt/libvirt/vif.py:248
msgid "Failed while unplugging vif"
msgstr ""
-#: nova/virt/libvirt/volume.py:176
+#: nova/virt/libvirt/volume.py:190
#, python-format
msgid "iSCSI device not found at %s"
msgstr ""
-#: nova/virt/libvirt/volume.py:179
+#: nova/virt/libvirt/volume.py:193
#, python-format
msgid ""
"ISCSI volume not yet found at: %(mount_device)s. Will rescan & retry. "
"Try number: %(tries)s"
msgstr ""
-#: nova/virt/libvirt/volume.py:191
+#: nova/virt/libvirt/volume.py:205
#, python-format
msgid "Found iSCSI node %(mount_device)s (after %(tries)s rescans)"
msgstr ""
-#: nova/virt/libvirt/volume_nfs.py:81
+#: nova/virt/libvirt/volume_nfs.py:82
#, python-format
msgid "%s is already mounted"
msgstr ""
@@ -6837,130 +6727,134 @@ msgstr ""
msgid "PowerVM LPAR instance '%(instance_name)s' cleanup failed"
msgstr ""
-#: nova/virt/powervm/operator.py:91
+#: nova/virt/powervm/operator.py:94
#, python-format
msgid "LPAR instance '%s' not found"
msgstr ""
-#: nova/virt/powervm/operator.py:174
+#: nova/virt/powervm/operator.py:178
msgid "Not enough free memory in the host"
msgstr ""
-#: nova/virt/powervm/operator.py:184
+#: nova/virt/powervm/operator.py:188
msgid "Insufficient available CPU on PowerVM"
msgstr ""
-#: nova/virt/powervm/operator.py:208
+#: nova/virt/powervm/operator.py:212
#, python-format
msgid "Creating LPAR instance '%s'"
msgstr ""
-#: nova/virt/powervm/operator.py:211
+#: nova/virt/powervm/operator.py:215
#, python-format
msgid "LPAR instance '%s' creation failed"
msgstr ""
-#: nova/virt/powervm/operator.py:221
+#: nova/virt/powervm/operator.py:225
#, python-format
msgid "Fetching image '%s' from glance"
msgstr ""
-#: nova/virt/powervm/operator.py:225
+#: nova/virt/powervm/operator.py:229
#, python-format
msgid "Copying image '%s' to IVM"
msgstr ""
-#: nova/virt/powervm/operator.py:230
+#: nova/virt/powervm/operator.py:234
msgid "Creating logical volume"
msgstr ""
-#: nova/virt/powervm/operator.py:235
+#: nova/virt/powervm/operator.py:239
#, python-format
msgid "Copying image to the device '%s'"
msgstr ""
-#: nova/virt/powervm/operator.py:238
+#: nova/virt/powervm/operator.py:242
#, python-format
msgid "PowerVM image creation failed: %s"
msgstr ""
-#: nova/virt/powervm/operator.py:244
+#: nova/virt/powervm/operator.py:248
#, python-format
msgid "Activating the LPAR instance '%s'"
msgstr ""
-#: nova/virt/powervm/operator.py:258
+#: nova/virt/powervm/operator.py:262
#, python-format
msgid "Instance '%s' failed to boot"
msgstr ""
-#: nova/virt/powervm/operator.py:275
+#: nova/virt/powervm/operator.py:274
+msgid "Error while attempting to clean up failed instance launch."
+msgstr ""
+
+#: nova/virt/powervm/operator.py:285
#, python-format
msgid "During destroy, LPAR instance '%s' was not found on PowerVM system."
msgstr ""
-#: nova/virt/powervm/operator.py:284
+#: nova/virt/powervm/operator.py:294
#, python-format
msgid "Shutting down the instance '%s'"
msgstr ""
-#: nova/virt/powervm/operator.py:288
+#: nova/virt/powervm/operator.py:298
#, python-format
msgid "Removing the logical volume '%s'"
msgstr ""
-#: nova/virt/powervm/operator.py:291
+#: nova/virt/powervm/operator.py:301
#, python-format
msgid "Deleting the LPAR instance '%s'"
msgstr ""
-#: nova/virt/powervm/operator.py:294
+#: nova/virt/powervm/operator.py:304
msgid "PowerVM instance cleanup failed"
msgstr ""
-#: nova/virt/powervm/operator.py:495
+#: nova/virt/powervm/operator.py:505
msgid "Could not create logical volume. No space left on any volume group."
msgstr ""
-#: nova/virt/powervm/operator.py:554
+#: nova/virt/powervm/operator.py:564
msgid "Unable to get checksum"
msgstr ""
-#: nova/virt/powervm/operator.py:557
+#: nova/virt/powervm/operator.py:567
msgid "Image checksums do not match"
msgstr ""
-#: nova/virt/powervm/operator.py:582
+#: nova/virt/powervm/operator.py:592
msgid "Uncompressed image file not found"
msgstr ""
-#: nova/virt/vmwareapi/driver.py:111
+#: nova/virt/vmwareapi/driver.py:112
msgid ""
"Must specify vmwareapi_host_ip,vmwareapi_host_username and "
"vmwareapi_host_password to usecompute_driver=vmwareapi.VMWareESXDriver"
msgstr ""
-#: nova/virt/vmwareapi/driver.py:275
+#: nova/virt/vmwareapi/driver.py:276
#, python-format
msgid "In vmwareapi:_create_session, got this exception: %s"
msgstr ""
-#: nova/virt/vmwareapi/driver.py:358
+#: nova/virt/vmwareapi/driver.py:359
#, python-format
msgid "In vmwareapi:_call_method, got this exception: %s"
msgstr ""
-#: nova/virt/vmwareapi/driver.py:393
+#: nova/virt/vmwareapi/driver.py:394
#, python-format
msgid "Task [%(task_name)s] %(task_ref)s status: success"
msgstr ""
-#: nova/virt/vmwareapi/driver.py:398
+#: nova/virt/vmwareapi/driver.py:399
#, python-format
msgid "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s"
msgstr ""
-#: nova/virt/vmwareapi/driver.py:402
+#: nova/virt/vmwareapi/driver.py:403
#, python-format
msgid "In vmwareapi:_poll_task, Got this error %s"
msgstr ""
@@ -7032,279 +6926,279 @@ msgstr ""
msgid "Created Port Group with name %s on the ESX host"
msgstr ""
-#: nova/virt/vmwareapi/read_write_util.py:145
+#: nova/virt/vmwareapi/read_write_util.py:143
#, python-format
msgid "Exception during HTTP connection close in VMWareHTTpWrite. Exception is %s"
msgstr ""
-#: nova/virt/vmwareapi/vim.py:84
+#: nova/virt/vmwareapi/vim.py:85
msgid "Unable to import suds."
msgstr ""
-#: nova/virt/vmwareapi/vim.py:90
+#: nova/virt/vmwareapi/vim.py:91
msgid "Must specify vmwareapi_wsdl_loc"
msgstr ""
-#: nova/virt/vmwareapi/vim.py:145
+#: nova/virt/vmwareapi/vim.py:146
#, python-format
msgid "No such SOAP method '%s' provided by VI SDK"
msgstr ""
-#: nova/virt/vmwareapi/vim.py:150
+#: nova/virt/vmwareapi/vim.py:151
#, python-format
msgid "httplib error in %s: "
msgstr ""
-#: nova/virt/vmwareapi/vim.py:157
+#: nova/virt/vmwareapi/vim.py:158
#, python-format
msgid "Socket error in %s: "
msgstr ""
-#: nova/virt/vmwareapi/vim.py:162
+#: nova/virt/vmwareapi/vim.py:163
#, python-format
msgid "Type error in %s: "
msgstr ""
-#: nova/virt/vmwareapi/vim.py:166
+#: nova/virt/vmwareapi/vim.py:167
#, python-format
msgid "Exception in %s "
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:66
+#: nova/virt/vmwareapi/vmops.py:67
msgid "Getting list of instances"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:82
+#: nova/virt/vmwareapi/vmops.py:83
#, python-format
msgid "Got total of %s instances"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:126
+#: nova/virt/vmwareapi/vmops.py:127
msgid "Couldn't get a local Datastore reference"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:196
+#: nova/virt/vmwareapi/vmops.py:197
msgid "Creating VM on the ESX host"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:204
+#: nova/virt/vmwareapi/vmops.py:205
msgid "Created VM on the ESX host"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:232
+#: nova/virt/vmwareapi/vmops.py:233
#, python-format
msgid ""
"Creating Virtual Disk of size %(vmdk_file_size_in_kb)s KB and adapter "
"type %(adapter_type)s on the ESX host local store %(data_store_name)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:250
+#: nova/virt/vmwareapi/vmops.py:251
#, python-format
msgid ""
"Created Virtual Disk of size %(vmdk_file_size_in_kb)s KB on the ESX host "
"local store %(data_store_name)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:260
+#: nova/virt/vmwareapi/vmops.py:261
#, python-format
msgid ""
"Deleting the file %(flat_uploaded_vmdk_path)s on the ESX host localstore "
"%(data_store_name)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:273
+#: nova/virt/vmwareapi/vmops.py:274
#, python-format
msgid ""
"Deleted the file %(flat_uploaded_vmdk_path)s on the ESX host local store "
"%(data_store_name)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:285
+#: nova/virt/vmwareapi/vmops.py:286
#, python-format
msgid ""
"Downloading image file data %(image_ref)s to the ESX data store "
"%(data_store_name)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:301
+#: nova/virt/vmwareapi/vmops.py:302
#, python-format
msgid ""
"Downloaded image file data %(image_ref)s to the ESX data store "
"%(data_store_name)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:319
+#: nova/virt/vmwareapi/vmops.py:320
msgid "Reconfiguring VM instance to attach the image disk"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:326
+#: nova/virt/vmwareapi/vmops.py:327
msgid "Reconfigured VM instance to attach the image disk"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:333
+#: nova/virt/vmwareapi/vmops.py:334
msgid "Powering on the VM instance"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:339
+#: nova/virt/vmwareapi/vmops.py:340
msgid "Powered on the VM instance"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:385
+#: nova/virt/vmwareapi/vmops.py:386
msgid "Creating Snapshot of the VM instance"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:395
+#: nova/virt/vmwareapi/vmops.py:396
msgid "Created Snapshot of the VM instance"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:438
+#: nova/virt/vmwareapi/vmops.py:439
msgid "Copying disk data before snapshot of the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:451
+#: nova/virt/vmwareapi/vmops.py:452
msgid "Copied disk data before snapshot of the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:460
+#: nova/virt/vmwareapi/vmops.py:461
#, python-format
msgid "Uploading image %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:474
+#: nova/virt/vmwareapi/vmops.py:475
#, python-format
msgid "Uploaded image %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:485
+#: nova/virt/vmwareapi/vmops.py:486
#, python-format
msgid "Deleting temporary vmdk file %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:494
+#: nova/virt/vmwareapi/vmops.py:495
#, python-format
msgid "Deleted temporary vmdk file %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:526
+#: nova/virt/vmwareapi/vmops.py:527
msgid "instance is not powered on"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:533
+#: nova/virt/vmwareapi/vmops.py:534
msgid "Rebooting guest OS of VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:536
+#: nova/virt/vmwareapi/vmops.py:537
msgid "Rebooted guest OS of VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:538
+#: nova/virt/vmwareapi/vmops.py:539
msgid "Doing hard reboot of VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:542
+#: nova/virt/vmwareapi/vmops.py:543
msgid "Did hard reboot of VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:554
+#: nova/virt/vmwareapi/vmops.py:555
msgid "instance not present"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:573
+#: nova/virt/vmwareapi/vmops.py:574
msgid "Powering off the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:578
+#: nova/virt/vmwareapi/vmops.py:579
msgid "Powered off the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:582
+#: nova/virt/vmwareapi/vmops.py:583
msgid "Unregistering the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:585
+#: nova/virt/vmwareapi/vmops.py:586
msgid "Unregistered the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:587
+#: nova/virt/vmwareapi/vmops.py:588
#, python-format
msgid ""
"In vmwareapi:vmops:destroy, got this exception while un-registering the "
"VM: %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:599
+#: nova/virt/vmwareapi/vmops.py:600
#, python-format
msgid "Deleting contents of the VM from datastore %(datastore_name)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:609
+#: nova/virt/vmwareapi/vmops.py:610
#, python-format
msgid "Deleted contents of the VM from datastore %(datastore_name)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:614
+#: nova/virt/vmwareapi/vmops.py:615
#, python-format
msgid ""
"In vmwareapi:vmops:destroy, got this exception while deleting the VM "
"contents from the disk: %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:623
+#: nova/virt/vmwareapi/vmops.py:624
msgid "pause not supported for vmwareapi"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:627
+#: nova/virt/vmwareapi/vmops.py:628
msgid "unpause not supported for vmwareapi"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:641
+#: nova/virt/vmwareapi/vmops.py:642
msgid "Suspending the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:645
+#: nova/virt/vmwareapi/vmops.py:646
msgid "Suspended the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:648
+#: nova/virt/vmwareapi/vmops.py:649
msgid "instance is powered off and can not be suspended."
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:651
+#: nova/virt/vmwareapi/vmops.py:652
msgid "VM was already in suspended state. So returning without doing anything"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:664
+#: nova/virt/vmwareapi/vmops.py:665
msgid "Resuming the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:669
+#: nova/virt/vmwareapi/vmops.py:670
msgid "Resumed the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:671
+#: nova/virt/vmwareapi/vmops.py:672
msgid "instance is not in a suspended state"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:707
+#: nova/virt/vmwareapi/vmops.py:708
msgid "get_diagnostics not implemented for vmwareapi"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:765
+#: nova/virt/vmwareapi/vmops.py:766
#, python-format
msgid "Reconfiguring VM instance to set the machine id with ip - %(ip_addr)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:773
+#: nova/virt/vmwareapi/vmops.py:774
#, python-format
msgid "Reconfigured VM instance to set the machine id with ip - %(ip_addr)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:810
+#: nova/virt/vmwareapi/vmops.py:811
#, python-format
msgid "Creating directory with path %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:814
+#: nova/virt/vmwareapi/vmops.py:815
#, python-format
msgid "Created directory with path %s"
msgstr ""
@@ -7339,103 +7233,103 @@ msgstr ""
msgid "Got image size of %(size)s for the image %(image)s"
msgstr ""
-#: nova/virt/xenapi/agent.py:85 nova/virt/xenapi/vmops.py:1491
+#: nova/virt/xenapi/agent.py:86 nova/virt/xenapi/vmops.py:1494
#, python-format
msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:89 nova/virt/xenapi/vmops.py:1495
+#: nova/virt/xenapi/agent.py:90 nova/virt/xenapi/vmops.py:1498
#, python-format
msgid ""
"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. "
"args=%(args)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:94 nova/virt/xenapi/vmops.py:1500
+#: nova/virt/xenapi/agent.py:95 nova/virt/xenapi/vmops.py:1503
#, python-format
msgid "The call to %(method)s returned an error: %(e)s. args=%(args)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:104
+#: nova/virt/xenapi/agent.py:105
#, python-format
msgid ""
"The agent call to %(method)s returned an invalid response: %(ret)r. "
"path=%(path)s; args=%(args)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:114
+#: nova/virt/xenapi/agent.py:115
#, python-format
msgid "Failed to query agent version: %(resp)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:132
+#: nova/virt/xenapi/agent.py:133
msgid "Querying agent version"
msgstr ""
-#: nova/virt/xenapi/agent.py:146
+#: nova/virt/xenapi/agent.py:147
msgid "Reached maximum time attempting to query agent version"
msgstr ""
-#: nova/virt/xenapi/agent.py:154
+#: nova/virt/xenapi/agent.py:155
#, python-format
msgid "Updating agent to %s"
msgstr ""
-#: nova/virt/xenapi/agent.py:162
+#: nova/virt/xenapi/agent.py:163
#, python-format
msgid "Failed to update agent: %(resp)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:176
+#: nova/virt/xenapi/agent.py:177
msgid "Setting admin password"
msgstr ""
-#: nova/virt/xenapi/agent.py:187
+#: nova/virt/xenapi/agent.py:188
#, python-format
msgid "Failed to exchange keys: %(resp)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:207
+#: nova/virt/xenapi/agent.py:208
#, python-format
msgid "Failed to update password: %(resp)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:214
+#: nova/virt/xenapi/agent.py:215
#, python-format
msgid "Injecting file path: %r"
msgstr ""
-#: nova/virt/xenapi/agent.py:227
+#: nova/virt/xenapi/agent.py:228
#, python-format
msgid "Failed to inject file: %(resp)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:234
+#: nova/virt/xenapi/agent.py:235
msgid "Resetting network"
msgstr ""
-#: nova/virt/xenapi/agent.py:240
+#: nova/virt/xenapi/agent.py:241
#, python-format
msgid "Failed to reset network: %(resp)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:263
+#: nova/virt/xenapi/agent.py:264
msgid ""
"XenServer tools installed in this image are capable of network injection."
" Networking files will not bemanipulated"
msgstr ""
-#: nova/virt/xenapi/agent.py:271
+#: nova/virt/xenapi/agent.py:272
msgid ""
"XenServer tools are present in this image but are not capable of network "
"injection"
msgstr ""
-#: nova/virt/xenapi/agent.py:275
+#: nova/virt/xenapi/agent.py:276
msgid "XenServer tools are not installed in this image"
msgstr ""
-#: nova/virt/xenapi/agent.py:327
+#: nova/virt/xenapi/agent.py:328
#, python-format
msgid "OpenSSL error: %s"
msgstr ""
@@ -7456,77 +7350,82 @@ msgstr ""
msgid "Could not determine key: %s"
msgstr ""
-#: nova/virt/xenapi/driver.py:571
+#: nova/virt/xenapi/driver.py:572
msgid "Host startup on XenServer is not supported."
msgstr ""
-#: nova/virt/xenapi/driver.py:623
+#: nova/virt/xenapi/driver.py:624
msgid "Unable to log in to XenAPI (is the Dom0 disk full?)"
msgstr ""
-#: nova/virt/xenapi/driver.py:661
+#: nova/virt/xenapi/driver.py:664
msgid "Host is member of a pool, but DB says otherwise"
msgstr ""
-#: nova/virt/xenapi/driver.py:745 nova/virt/xenapi/driver.py:759
+#: nova/virt/xenapi/driver.py:748 nova/virt/xenapi/driver.py:762
#, python-format
msgid "Got exception: %s"
msgstr ""
-#: nova/virt/xenapi/fake.py:670 nova/virt/xenapi/fake.py:772
-#: nova/virt/xenapi/fake.py:791 nova/virt/xenapi/fake.py:859
+#: nova/virt/xenapi/fake.py:672 nova/virt/xenapi/fake.py:774
+#: nova/virt/xenapi/fake.py:793 nova/virt/xenapi/fake.py:861
msgid "Raising NotImplemented"
msgstr ""
-#: nova/virt/xenapi/fake.py:672
+#: nova/virt/xenapi/fake.py:674
#, python-format
msgid "xenapi.fake does not have an implementation for %s"
msgstr ""
-#: nova/virt/xenapi/fake.py:706
+#: nova/virt/xenapi/fake.py:708
#, python-format
msgid "Calling %(localname)s %(impl)s"
msgstr ""
-#: nova/virt/xenapi/fake.py:711
+#: nova/virt/xenapi/fake.py:713
#, python-format
msgid "Calling getter %s"
msgstr ""
-#: nova/virt/xenapi/fake.py:714
+#: nova/virt/xenapi/fake.py:716
#, python-format
msgid "Calling setter %s"
msgstr ""
-#: nova/virt/xenapi/fake.py:774
+#: nova/virt/xenapi/fake.py:776
#, python-format
msgid ""
"xenapi.fake does not have an implementation for %s or it has been called "
"with the wrong number of arguments"
msgstr ""
-#: nova/virt/xenapi/host.py:71
+#: nova/virt/xenapi/host.py:70
#, python-format
msgid ""
"Instance %(name)s running on %(host)s could not be found in the database:"
" assuming it is a worker VM and skip ping migration to a new host"
msgstr ""
-#: nova/virt/xenapi/host.py:157
+#: nova/virt/xenapi/host.py:82
+#, python-format
+msgid "Aggregate for host %(host)s count not be found."
+msgstr ""
+
+#: nova/virt/xenapi/host.py:164
#, python-format
msgid "Unable to get SR for this host: %s"
msgstr ""
-#: nova/virt/xenapi/host.py:191
+#: nova/virt/xenapi/host.py:199
#, python-format
msgid "Failed to extract instance support from %s"
msgstr ""
-#: nova/virt/xenapi/host.py:208
+#: nova/virt/xenapi/host.py:216
msgid "Unable to get updated status"
msgstr ""
-#: nova/virt/xenapi/host.py:211
+#: nova/virt/xenapi/host.py:219
#, python-format
msgid "The call to %(method)s returned an error: %(e)s."
msgstr ""
@@ -7546,768 +7445,776 @@ msgstr ""
msgid "Found no network for bridge %s"
msgstr ""
-#: nova/virt/xenapi/pool.py:75
+#: nova/virt/xenapi/pool.py:78
#, python-format
msgid ""
"Aggregate %(aggregate_id)s: unrecoverable state during operation on "
"%(host)s"
msgstr ""
-#: nova/virt/xenapi/pool.py:166
+#: nova/virt/xenapi/pool.py:173
#, python-format
msgid "Unable to eject %(host)s from the pool; pool not empty"
msgstr ""
-#: nova/virt/xenapi/pool.py:182
+#: nova/virt/xenapi/pool.py:190
#, python-format
msgid "Unable to eject %(host)s from the pool; No master found"
msgstr ""
-#: nova/virt/xenapi/pool.py:199
+#: nova/virt/xenapi/pool.py:207
#, python-format
msgid "Pool-Join failed: %(e)s"
msgstr ""
-#: nova/virt/xenapi/pool.py:202
+#: nova/virt/xenapi/pool.py:210
#, python-format
msgid "Unable to join %(host)s in the pool"
msgstr ""
-#: nova/virt/xenapi/pool.py:218
+#: nova/virt/xenapi/pool.py:226
#, python-format
msgid "Pool-eject failed: %(e)s"
msgstr ""
-#: nova/virt/xenapi/pool.py:230
+#: nova/virt/xenapi/pool.py:238
#, python-format
msgid "Unable to set up pool: %(e)s."
msgstr ""
-#: nova/virt/xenapi/pool.py:241
+#: nova/virt/xenapi/pool.py:249
#, python-format
msgid "Pool-set_name_label failed: %(e)s"
msgstr ""
-#: nova/virt/xenapi/vif.py:104
+#: nova/virt/xenapi/vif.py:105
#, python-format
msgid "Found no PIF for device %s"
msgstr ""
-#: nova/virt/xenapi/vif.py:123
+#: nova/virt/xenapi/vif.py:124
#, python-format
msgid ""
"PIF %(pif_rec['uuid'])s for network %(bridge)s has VLAN id %(pif_vlan)d. "
"Expected %(vlan_num)d"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:257
+#: nova/virt/xenapi/vm_utils.py:264
msgid "Created VM"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:269
+#: nova/virt/xenapi/vm_utils.py:276
msgid "VM destroyed"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:276
+#: nova/virt/xenapi/vm_utils.py:281 nova/virt/xenapi/vm_utils.py:296
msgid "VM already halted, skipping shutdown..."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:280
-msgid "Shutting down VM"
+#: nova/virt/xenapi/vm_utils.py:285
+msgid "Shutting down VM (cleanly)"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:312
+#: nova/virt/xenapi/vm_utils.py:300
+msgid "Shutting down VM (hard)"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:339
#, python-format
msgid "VBD not found in instance %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:329
+#: nova/virt/xenapi/vm_utils.py:356
#, python-format
msgid "VBD %s already detached"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:332
+#: nova/virt/xenapi/vm_utils.py:359
#, python-format
msgid "VBD %(vbd_ref)s detach rejected, attempt %(num_attempt)d/%(max_attempts)d"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:337
+#: nova/virt/xenapi/vm_utils.py:364
#, python-format
msgid "Unable to unplug VBD %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:342
+#: nova/virt/xenapi/vm_utils.py:369
#, python-format
msgid "Reached maximum number of retries trying to unplug VBD %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:353
+#: nova/virt/xenapi/vm_utils.py:380
#, python-format
msgid "Unable to destroy VBD %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:372
+#: nova/virt/xenapi/vm_utils.py:399
#, python-format
msgid "Creating %(vbd_type)s-type VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... "
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:375
+#: nova/virt/xenapi/vm_utils.py:402
#, python-format
msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:391
+#: nova/virt/xenapi/vm_utils.py:418
#, python-format
msgid "Unable to destroy VDI %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:423
+#: nova/virt/xenapi/vm_utils.py:450
#, python-format
msgid ""
"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s)"
" on %(sr_ref)s."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:454
+#: nova/virt/xenapi/vm_utils.py:481
msgid "SR not present and could not be introduced"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:555
+#: nova/virt/xenapi/vm_utils.py:582
#, python-format
msgid "Cloned VDI %(vdi_ref)s from VDI %(vdi_to_clone_ref)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:575
+#: nova/virt/xenapi/vm_utils.py:602
#, python-format
msgid "No primary VDI found for %(vm_ref)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:584
+#: nova/virt/xenapi/vm_utils.py:611
msgid "Starting snapshot for VM"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:632
+#: nova/virt/xenapi/vm_utils.py:659
#, python-format
msgid "Destroying cached VDI '%(vdi_uuid)s'"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:690
+#: nova/virt/xenapi/vm_utils.py:717
#, python-format
msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:876
+#: nova/virt/xenapi/vm_utils.py:903
#, python-format
msgid ""
"Fast cloning is only supported on default local SR of type ext. SR on "
"this system was found to be of type %(sr_type)s. Ignoring the cow flag."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:934
+#: nova/virt/xenapi/vm_utils.py:959
#, python-format
msgid "Unrecognized cache_images value '%s', defaulting to True"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:968
+#: nova/virt/xenapi/vm_utils.py:993
#, python-format
msgid "Fetched VDIs of type '%(vdi_type)s' with UUID '%(vdi_uuid)s'"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:980
+#: nova/virt/xenapi/vm_utils.py:1005
#, python-format
msgid ""
"download_vhd %(image_id)s, attempt %(attempt_num)d/%(max_attempts)d, "
"params: %(params)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:993
+#: nova/virt/xenapi/vm_utils.py:1018
#, python-format
msgid "download_vhd failed: %r"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1029
+#: nova/virt/xenapi/vm_utils.py:1052
#, python-format
msgid "Invalid value '%s' for xenapi_torrent_images"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1040
+#: nova/virt/xenapi/vm_utils.py:1063
#, python-format
msgid "Asking xapi to fetch vhd image %(image_id)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1104
+#: nova/virt/xenapi/vm_utils.py:1127
#, python-format
msgid "vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes=%(vdi_size_bytes)d"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1120
+#: nova/virt/xenapi/vm_utils.py:1143
#, python-format
msgid "image_size_bytes=%(size_bytes)d, allowed_size_bytes=%(allowed_size_bytes)d"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1124
+#: nova/virt/xenapi/vm_utils.py:1147
#, python-format
msgid ""
"Image size %(size_bytes)d exceeded instance_type allowed size "
"%(allowed_size_bytes)d"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1146
+#: nova/virt/xenapi/vm_utils.py:1169
#, python-format
msgid "Fetching image %(image_id)s, type %(image_type_str)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1159
+#: nova/virt/xenapi/vm_utils.py:1182
#, python-format
msgid "Size for image %(image_id)s: %(virtual_size)d"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1168
+#: nova/virt/xenapi/vm_utils.py:1191
#, python-format
msgid ""
"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d "
"bytes"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1187
+#: nova/virt/xenapi/vm_utils.py:1210
#, python-format
msgid "Copying VDI %s to /boot/guest on dom0"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1201
+#: nova/virt/xenapi/vm_utils.py:1224
#, python-format
msgid "Kernel/Ramdisk VDI %s destroyed"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1210
+#: nova/virt/xenapi/vm_utils.py:1233
msgid "Failed to fetch glance image"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1248
+#: nova/virt/xenapi/vm_utils.py:1271
#, python-format
msgid "Detected %(image_type_str)s format for image %(image_ref)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1269
+#: nova/virt/xenapi/vm_utils.py:1292
#, python-format
msgid "Looking up vdi %s for PV kernel"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1287
+#: nova/virt/xenapi/vm_utils.py:1310
#, python-format
msgid "Unknown image format %(disk_image_type)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1318
+#: nova/virt/xenapi/vm_utils.py:1341
#, python-format
msgid "VDI %s is still available"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1414
+#: nova/virt/xenapi/vm_utils.py:1437
#, python-format
msgid "Unable to parse rrd of %(vm_uuid)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1441
+#: nova/virt/xenapi/vm_utils.py:1464
#, python-format
msgid "Re-scanning SR %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1469
+#: nova/virt/xenapi/vm_utils.py:1492
#, python-format
msgid "Flag sr_matching_filter '%s' does not respect formatting convention"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1487
+#: nova/virt/xenapi/vm_utils.py:1510
msgid ""
"XenAPI is unable to find a Storage Repository to install guest instances "
"on. Please check your configuration and/or configure the flag "
"'sr_matching_filter'"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1500
+#: nova/virt/xenapi/vm_utils.py:1523
msgid "Cannot find SR of content-type ISO"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1508
+#: nova/virt/xenapi/vm_utils.py:1531
#, python-format
msgid "ISO: looking at SR %(sr_rec)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1510
+#: nova/virt/xenapi/vm_utils.py:1533
msgid "ISO: not iso content"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1513
+#: nova/virt/xenapi/vm_utils.py:1536
msgid "ISO: iso content_type, no 'i18n-key' key"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1516
+#: nova/virt/xenapi/vm_utils.py:1539
msgid "ISO: iso content_type, i18n-key value not 'local-storage-iso'"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1520
+#: nova/virt/xenapi/vm_utils.py:1543
msgid "ISO: SR MATCHing our criteria"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1522
+#: nova/virt/xenapi/vm_utils.py:1545
msgid "ISO: ISO, looking to see if it is host local"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1525
+#: nova/virt/xenapi/vm_utils.py:1548
#, python-format
msgid "ISO: PBD %(pbd_ref)s disappeared"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1528
+#: nova/virt/xenapi/vm_utils.py:1551
#, python-format
msgid "ISO: PBD matching, want %(pbd_rec)s, have %(host)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1531
+#: nova/virt/xenapi/vm_utils.py:1554
msgid "ISO: SR with local PBD"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1553
+#: nova/virt/xenapi/vm_utils.py:1576
#, python-format
msgid ""
"Unable to obtain RRD XML for VM %(vm_uuid)s with server details: "
"%(server)s."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1569
+#: nova/virt/xenapi/vm_utils.py:1592
#, python-format
msgid "Unable to obtain RRD XML updates with server details: %(server)s."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1623
+#: nova/virt/xenapi/vm_utils.py:1646
#, python-format
msgid "Invalid statistics data from Xenserver: %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1683
+#: nova/virt/xenapi/vm_utils.py:1706
#, python-format
msgid "VHD %(vdi_uuid)s has parent %(parent_uuid)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1770
+#: nova/virt/xenapi/vm_utils.py:1793
#, python-format
msgid ""
"Parent %(parent_uuid)s doesn't match original parent "
"%(original_parent_uuid)s, waiting for coalesce..."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1780
+#: nova/virt/xenapi/vm_utils.py:1803
#, python-format
msgid "VHD coalesce attempts exceeded (%(max_attempts)d), giving up..."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1815
+#: nova/virt/xenapi/vm_utils.py:1838
#, python-format
msgid "Timeout waiting for device %s to be created"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1835
+#: nova/virt/xenapi/vm_utils.py:1858
#, python-format
msgid "Disconnecting stale VDI %s from compute domU"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1848
+#: nova/virt/xenapi/vm_utils.py:1871
#, python-format
msgid "Plugging VBD %s ... "
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1851
+#: nova/virt/xenapi/vm_utils.py:1874
#, python-format
msgid "Plugging VBD %s done."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1853
+#: nova/virt/xenapi/vm_utils.py:1876
#, python-format
msgid "VBD %(vbd_ref)s plugged as %(orig_dev)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1856
+#: nova/virt/xenapi/vm_utils.py:1879
#, python-format
msgid "VBD %(vbd_ref)s plugged into wrong dev, remapping to %(dev)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1861
+#: nova/virt/xenapi/vm_utils.py:1884
#, python-format
msgid "Destroying VBD for VDI %s ... "
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1869
+#: nova/virt/xenapi/vm_utils.py:1892
#, python-format
msgid "Destroying VBD for VDI %s done."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1882
+#: nova/virt/xenapi/vm_utils.py:1905
#, python-format
msgid "Running pygrub against %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1889
+#: nova/virt/xenapi/vm_utils.py:1912
#, python-format
msgid "Found Xen kernel %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1891
+#: nova/virt/xenapi/vm_utils.py:1914
msgid "No Xen kernel found. Booting HVM."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1904
+#: nova/virt/xenapi/vm_utils.py:1927
msgid "Partitions:"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1910
+#: nova/virt/xenapi/vm_utils.py:1933
#, python-format
msgid " %(num)s: %(ptype)s %(size)d sectors"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1935
+#: nova/virt/xenapi/vm_utils.py:1958
#, python-format
msgid ""
"Writing partition table %(primary_first)d %(primary_last)d to "
"%(dev_path)s..."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1948
+#: nova/virt/xenapi/vm_utils.py:1971
#, python-format
msgid "Writing partition table %s done."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:2002
+#: nova/virt/xenapi/vm_utils.py:2025
#, python-format
msgid ""
"Starting sparse_copy src=%(src_path)s dst=%(dst_path)s "
"virtual_size=%(virtual_size)d block_size=%(block_size)d"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:2034
+#: nova/virt/xenapi/vm_utils.py:2057
#, python-format
msgid ""
"Finished sparse_copy in %(duration).2f secs, %(compression_pct).2f%% "
"reduction in size"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:2083
+#: nova/virt/xenapi/vm_utils.py:2106
msgid "Manipulating interface files directly"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:2092
+#: nova/virt/xenapi/vm_utils.py:2115
#, python-format
msgid "Failed to mount filesystem (expected for non-linux instances): %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:2204
+#: nova/virt/xenapi/vm_utils.py:2227
msgid "This domU must be running on the host specified by xenapi_connection_url"
msgstr ""
-#: nova/virt/xenapi/vmops.py:128 nova/virt/xenapi/vmops.py:674
+#: nova/virt/xenapi/vmops.py:125 nova/virt/xenapi/vmops.py:673
#, python-format
msgid "Updating progress to %(progress)d"
msgstr ""
-#: nova/virt/xenapi/vmops.py:169
+#: nova/virt/xenapi/vmops.py:167
msgid "Error: Agent is disabled"
msgstr ""
-#: nova/virt/xenapi/vmops.py:237
+#: nova/virt/xenapi/vmops.py:235
msgid "Starting instance"
msgstr ""
-#: nova/virt/xenapi/vmops.py:305
+#: nova/virt/xenapi/vmops.py:304
msgid "Removing kernel/ramdisk files from dom0"
msgstr ""
-#: nova/virt/xenapi/vmops.py:377
+#: nova/virt/xenapi/vmops.py:376
#, python-format
msgid "Block device information present: %s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:408
+#: nova/virt/xenapi/vmops.py:407
msgid "Failed to spawn, rolling back"
msgstr ""
-#: nova/virt/xenapi/vmops.py:481
+#: nova/virt/xenapi/vmops.py:480
msgid "Detected ISO image type, creating blank VM for install"
msgstr ""
-#: nova/virt/xenapi/vmops.py:498
+#: nova/virt/xenapi/vmops.py:497
msgid "Auto configuring disk, attempting to resize partition..."
msgstr ""
-#: nova/virt/xenapi/vmops.py:524
+#: nova/virt/xenapi/vmops.py:523
msgid "Starting VM"
msgstr ""
-#: nova/virt/xenapi/vmops.py:530
+#: nova/virt/xenapi/vmops.py:529
msgid "Waiting for instance state to become running"
msgstr ""
-#: nova/virt/xenapi/vmops.py:544
+#: nova/virt/xenapi/vmops.py:543
#, python-format
msgid ""
"Latest agent build for %(hypervisor)s/%(os)s/%(architecture)s is "
"%(version)s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:547
+#: nova/virt/xenapi/vmops.py:546
#, python-format
msgid "No agent build found for %(hypervisor)s/%(os)s/%(architecture)s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:558
+#: nova/virt/xenapi/vmops.py:557
#, python-format
msgid "Instance agent version: %s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:585
+#: nova/virt/xenapi/vmops.py:584
msgid "Setting VCPU weight"
msgstr ""
-#: nova/virt/xenapi/vmops.py:593
+#: nova/virt/xenapi/vmops.py:592
#, python-format
msgid "Could not find VM with name %s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:643
+#: nova/virt/xenapi/vmops.py:642
msgid "Finished snapshot and upload for VM"
msgstr ""
-#: nova/virt/xenapi/vmops.py:647
+#: nova/virt/xenapi/vmops.py:646
#, python-format
msgid "Migrating VHD '%(vdi_uuid)s' with seq_num %(seq_num)d"
msgstr ""
-#: nova/virt/xenapi/vmops.py:655
+#: nova/virt/xenapi/vmops.py:654
msgid "Failed to transfer vhd to new host"
msgstr ""
-#: nova/virt/xenapi/vmops.py:692
+#: nova/virt/xenapi/vmops.py:691
#, python-format
msgid "Resizing down VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB"
msgstr ""
-#: nova/virt/xenapi/vmops.py:816
+#: nova/virt/xenapi/vmops.py:697 nova/virt/xenapi/vmops.py:747
+msgid "Clean shutdown did not complete successfully, trying hard shutdown."
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:819
#, python-format
msgid "Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB"
msgstr ""
-#: nova/virt/xenapi/vmops.py:821
+#: nova/virt/xenapi/vmops.py:824
msgid "Resize complete"
msgstr ""
-#: nova/virt/xenapi/vmops.py:865
+#: nova/virt/xenapi/vmops.py:868
msgid "Starting halted instance found during reboot"
msgstr ""
-#: nova/virt/xenapi/vmops.py:956
+#: nova/virt/xenapi/vmops.py:959
msgid "Unable to find root VBD/VDI for VM"
msgstr ""
-#: nova/virt/xenapi/vmops.py:982
+#: nova/virt/xenapi/vmops.py:985
msgid "Destroying VDIs"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1009
+#: nova/virt/xenapi/vmops.py:1012
msgid "Using RAW or VHD, skipping kernel and ramdisk deletion"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1016
+#: nova/virt/xenapi/vmops.py:1019
msgid "instance has a kernel or ramdisk but not both"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1023
+#: nova/virt/xenapi/vmops.py:1026
msgid "kernel/ramdisk files removed"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1049
+#: nova/virt/xenapi/vmops.py:1052
msgid "Destroying VM"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1075
+#: nova/virt/xenapi/vmops.py:1078
msgid "VM is not present, skipping destroy..."
msgstr ""
-#: nova/virt/xenapi/vmops.py:1126
+#: nova/virt/xenapi/vmops.py:1129
#, python-format
msgid "Instance is already in Rescue Mode: %s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1160
+#: nova/virt/xenapi/vmops.py:1163
msgid "VM is not present, skipping soft delete..."
msgstr ""
-#: nova/virt/xenapi/vmops.py:1209
+#: nova/virt/xenapi/vmops.py:1212
#, python-format
msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1213
+#: nova/virt/xenapi/vmops.py:1216
msgid "Automatically hard rebooting"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1312
+#: nova/virt/xenapi/vmops.py:1315
msgid "Fetching VM ref while BUILDING failed"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1395
+#: nova/virt/xenapi/vmops.py:1398
msgid "Injecting network info to xenstore"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1414
+#: nova/virt/xenapi/vmops.py:1417
msgid "Creating vifs"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1423
+#: nova/virt/xenapi/vmops.py:1426
#, python-format
msgid "Creating VIF for network %(network_ref)s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1426
+#: nova/virt/xenapi/vmops.py:1429
#, python-format
msgid "Created VIF %(vif_ref)s, network %(network_ref)s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1454
+#: nova/virt/xenapi/vmops.py:1457
msgid "Injecting hostname to xenstore"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1550
+#: nova/virt/xenapi/vmops.py:1553
#, python-format
msgid ""
"Destination host:%(hostname)s must be in the same aggregate as the source"
" server"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1582
+#: nova/virt/xenapi/vmops.py:1585
msgid "Migrate Receive failed"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1630
+#: nova/virt/xenapi/vmops.py:1633
msgid "VM.assert_can_migratefailed"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1666
+#: nova/virt/xenapi/vmops.py:1669
msgid "Migrate Send failed"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:41
+#: nova/virt/xenapi/volume_utils.py:42
msgid "creating sr within volume_utils"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:44 nova/virt/xenapi/volume_utils.py:72
+#: nova/virt/xenapi/volume_utils.py:45 nova/virt/xenapi/volume_utils.py:73
#, python-format
msgid "type is = %s"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:47 nova/virt/xenapi/volume_utils.py:75
+#: nova/virt/xenapi/volume_utils.py:48 nova/virt/xenapi/volume_utils.py:76
#, python-format
msgid "name = %s"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:60
+#: nova/virt/xenapi/volume_utils.py:61
#, python-format
msgid "Created %(label)s as %(sr_ref)s."
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:65 nova/virt/xenapi/volume_utils.py:163
+#: nova/virt/xenapi/volume_utils.py:66 nova/virt/xenapi/volume_utils.py:164
msgid "Unable to create Storage Repository"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:69
+#: nova/virt/xenapi/volume_utils.py:70
msgid "introducing sr within volume_utils"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:92 nova/virt/xenapi/volume_utils.py:159
+#: nova/virt/xenapi/volume_utils.py:93 nova/virt/xenapi/volume_utils.py:160
#: nova/virt/xenapi/volumeops.py:150
#, python-format
msgid "Introduced %(label)s as %(sr_ref)s."
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:95
+#: nova/virt/xenapi/volume_utils.py:96
msgid "Creating pbd for SR"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:97
+#: nova/virt/xenapi/volume_utils.py:98
msgid "Plugging SR"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:105 nova/virt/xenapi/volumeops.py:154
+#: nova/virt/xenapi/volume_utils.py:106 nova/virt/xenapi/volumeops.py:154
msgid "Unable to introduce Storage Repository"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:116 nova/virt/xenapi/volumeops.py:46
+#: nova/virt/xenapi/volume_utils.py:117 nova/virt/xenapi/volumeops.py:46
msgid "Unable to get SR using uuid"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:118
+#: nova/virt/xenapi/volume_utils.py:119
#, python-format
msgid "Forgetting SR %s..."
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:126
+#: nova/virt/xenapi/volume_utils.py:127
msgid "Unable to forget Storage Repository"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:146
+#: nova/virt/xenapi/volume_utils.py:147
#, python-format
msgid "Introducing %s..."
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:175
+#: nova/virt/xenapi/volume_utils.py:176
#, python-format
msgid "Unable to find SR from VBD %s"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:193
+#: nova/virt/xenapi/volume_utils.py:194
#, python-format
msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:199
+#: nova/virt/xenapi/volume_utils.py:200
#, python-format
msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:223
+#: nova/virt/xenapi/volume_utils.py:224
#, python-format
msgid "Unable to introduce VDI on SR %s"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:231
+#: nova/virt/xenapi/volume_utils.py:232
#, python-format
msgid "Unable to get record of VDI %s on"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:253
+#: nova/virt/xenapi/volume_utils.py:254
#, python-format
msgid "Unable to introduce VDI for SR %s"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:263
+#: nova/virt/xenapi/volume_utils.py:264
#, python-format
msgid "Error finding vdis in SR %s"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:270
+#: nova/virt/xenapi/volume_utils.py:271
#, python-format
msgid "Unable to find vbd for vdi %s"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:281
+#: nova/virt/xenapi/volume_utils.py:282
#, python-format
msgid "Unable to obtain target information %(mountpoint)s"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:309
+#: nova/virt/xenapi/volume_utils.py:310
#, python-format
msgid "Unable to obtain target information %(connection_data)s"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:335
+#: nova/virt/xenapi/volume_utils.py:336
#, python-format
msgid "Mountpoint cannot be translated: %s"
msgstr ""
@@ -8401,169 +8308,43 @@ msgstr ""
msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s"
msgstr ""
-#: nova/vnc/xvp_proxy.py:96 nova/vnc/xvp_proxy.py:101
+#: nova/vnc/xvp_proxy.py:97 nova/vnc/xvp_proxy.py:102
#, python-format
msgid "Error in handshake: %s"
msgstr ""
-#: nova/vnc/xvp_proxy.py:117
+#: nova/vnc/xvp_proxy.py:118
#, python-format
msgid "Invalid request: %s"
msgstr ""
-#: nova/vnc/xvp_proxy.py:137
+#: nova/vnc/xvp_proxy.py:138
#, python-format
msgid "Request: %s"
msgstr ""
-#: nova/vnc/xvp_proxy.py:140
+#: nova/vnc/xvp_proxy.py:141
#, python-format
msgid "Request made with missing token: %s"
msgstr ""
-#: nova/vnc/xvp_proxy.py:150
+#: nova/vnc/xvp_proxy.py:151
#, python-format
msgid "Request made with invalid token: %s"
msgstr ""
-#: nova/vnc/xvp_proxy.py:157
+#: nova/vnc/xvp_proxy.py:158
#, python-format
msgid "Unexpected error: %s"
msgstr ""
-#: nova/vnc/xvp_proxy.py:177
+#: nova/vnc/xvp_proxy.py:178
#, python-format
msgid "Starting nova-xvpvncproxy node (version %s)"
msgstr ""
-#: nova/volume/cinder.py:68
+#: nova/volume/cinder.py:69
#, python-format
msgid "Cinderclient connection created using URL: %s"
msgstr ""
-#: nova/volume/driver.py:103
-#, python-format
-msgid "Recovering from a failed execute. Try number %s"
-msgstr ""
-
-#: nova/volume/driver.py:113
-#, python-format
-msgid "volume group %s doesn't exist"
-msgstr ""
-
-#: nova/volume/driver.py:324
-#, python-format
-msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s"
-msgstr ""
-
-#: nova/volume/driver.py:397
-#, python-format
-msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s"
-msgstr ""
-
-#: nova/volume/driver.py:414
-#, python-format
-msgid ""
-"Skipping remove_export. No iscsi_target is presently exported for volume:"
-" %s"
-msgstr ""
-
-#: nova/volume/driver.py:423
-msgid "ISCSI provider_location not stored, using discovery"
-msgstr ""
-
-#: nova/volume/driver.py:470
-#, python-format
-msgid "Could not find iSCSI export for volume %s"
-msgstr ""
-
-#: nova/volume/driver.py:474
-#, python-format
-msgid "ISCSI Discovery: Found %s"
-msgstr ""
-
-#: nova/volume/driver.py:565
-#, python-format
-msgid "Cannot confirm exported volume id:%(volume_id)s."
-msgstr ""
-
-#: nova/volume/driver.py:606
-#, python-format
-msgid "FAKE ISCSI: %s"
-msgstr ""
-
-#: nova/volume/driver.py:618
-#, python-format
-msgid "rbd has no pool %s"
-msgstr ""
-
-#: nova/volume/driver.py:740
-#, python-format
-msgid "Image %s is not stored in rbd"
-msgstr ""
-
-#: nova/volume/driver.py:744
-#, python-format
-msgid "Image %s has blank components"
-msgstr ""
-
-#: nova/volume/driver.py:747
-#, python-format
-msgid "Image %s is not an rbd snapshot"
-msgstr ""
-
-#: nova/volume/driver.py:762
-#, python-format
-msgid "%s is in a different ceph cluster"
-msgstr ""
-
-#: nova/volume/driver.py:773
-#, python-format
-msgid "Unable to read image %s"
-msgstr ""
-
-#: nova/volume/driver.py:815
-#, python-format
-msgid "Sheepdog is not working: %s"
-msgstr ""
-
-#: nova/volume/driver.py:820
-msgid "Sheepdog is not working"
-msgstr ""
-
-#: nova/volume/driver.py:924 nova/volume/driver.py:929
-#, python-format
-msgid "LoggingVolumeDriver: %s"
-msgstr ""
-
-#: nova/volume/iscsi.py:122
-#, python-format
-msgid "Creating volume: %s"
-msgstr ""
-
-#: nova/volume/iscsi.py:136
-#, python-format
-msgid "Failed to create iscsi target for volume id:%(vol_id)s."
-msgstr ""
-
-#: nova/volume/iscsi.py:146
-#, python-format
-msgid ""
-"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure "
-"your tgtd config file contains 'include %(volumes_dir)s/*'"
-msgstr ""
-
-#: nova/volume/iscsi.py:154 nova/volume/iscsi.py:196
-#, python-format
-msgid "Removing volume: %s"
-msgstr ""
-
-#: nova/volume/iscsi.py:168
-#, python-format
-msgid "Failed to create iscsi target for volume id:%(volume_id)s."
-msgstr ""
-
-#: nova/volume/iscsi.py:177
-msgid "valid iqn needed for show_target"
-msgstr ""
-
diff --git a/nova/manager.py b/nova/manager.py
index 22a42d2d3..8beae2732 100644
--- a/nova/manager.py
+++ b/nova/manager.py
@@ -57,7 +57,6 @@ import eventlet
from nova import config
from nova.db import base
-from nova import flags
from nova.openstack.common import log as logging
from nova.openstack.common.plugin import pluginmanager
from nova.openstack.common.rpc import dispatcher as rpc_dispatcher
@@ -140,6 +139,7 @@ class Manager(base.Base):
host = CONF.host
self.host = host
self.load_plugins()
+ self.backdoor_port = None
super(Manager, self).__init__(db_driver)
def load_plugins(self):
@@ -240,6 +240,8 @@ class SchedulerDependentManager(Manager):
def update_service_capabilities(self, capabilities):
"""Remember these capabilities to send on next periodic update."""
+ if not isinstance(capabilities, list):
+ capabilities = [capabilities]
self.last_capabilities = capabilities
@periodic_task
@@ -251,5 +253,8 @@ class SchedulerDependentManager(Manager):
"""
if self.last_capabilities:
LOG.debug(_('Notifying Schedulers of capabilities ...'))
- self.scheduler_rpcapi.update_service_capabilities(context,
- self.service_name, self.host, self.last_capabilities)
+ for capability_item in self.last_capabilities:
+ self.scheduler_rpcapi.update_service_capabilities(context,
+ self.service_name, self.host, capability_item)
+ # TODO(NTTdocomo): Make update_service_capabilities() accept a list
+ # of capabilities
diff --git a/nova/network/__init__.py b/nova/network/__init__.py
index 221130a91..79955e1eb 100644
--- a/nova/network/__init__.py
+++ b/nova/network/__init__.py
@@ -19,7 +19,6 @@
# Importing full names to not pollute the namespace and cause possible
# collisions with use of 'from nova.network import <foo>' elsewhere.
import nova.config
-import nova.flags
import nova.openstack.common.importutils
diff --git a/nova/network/api.py b/nova/network/api.py
index 46a7e0360..049b8c7c0 100644
--- a/nova/network/api.py
+++ b/nova/network/api.py
@@ -20,9 +20,9 @@
import functools
import inspect
+from nova import config
from nova.db import base
from nova import exception
-from nova import flags
from nova.network import model as network_model
from nova.network import rpcapi as network_rpcapi
from nova.openstack.common import log as logging
@@ -124,6 +124,9 @@ class API(base.Base):
return self.network_rpcapi.get_floating_ips_by_fixed_address(context,
fixed_address)
+ def get_backdoor_port(self, context):
+ return self.network_rpcapi.get_backdoor_port(context)
+
def get_instance_id_by_floating_address(self, context, address):
# NOTE(tr3buchet): i hate this
return self.network_rpcapi.get_instance_id_by_floating_address(context,
@@ -329,7 +332,7 @@ class API(base.Base):
except exception.FixedIpNotFoundForInstance:
return False
network = self.db.network_get(context, fixed_ips[0]['network_id'],
- project_only=True)
+ project_only='allow_none')
return network['multi_host']
def _get_floating_ip_addresses(self, context, instance):
@@ -351,7 +354,7 @@ class API(base.Base):
if self._is_multi_host(context, instance):
args['floating_addresses'] = \
self._get_floating_ip_addresses(context, instance)
- args['host'] = migration['dest_compute']
+ args['host'] = migration['source_compute']
self.network_rpcapi.migrate_instance_start(context, **args)
diff --git a/nova/network/l3.py b/nova/network/l3.py
index 6c16dbeb6..e937245e7 100644
--- a/nova/network/l3.py
+++ b/nova/network/l3.py
@@ -15,7 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-from nova import flags
+from nova import config
from nova.network import linux_net
from nova.openstack.common import log as logging
from nova import utils
diff --git a/nova/network/ldapdns.py b/nova/network/ldapdns.py
index 661c3ad56..15b6ca235 100644
--- a/nova/network/ldapdns.py
+++ b/nova/network/ldapdns.py
@@ -17,7 +17,6 @@ import time
from nova import config
from nova import exception
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import utils
diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py
index 994a3c0f4..c57e9a730 100644
--- a/nova/network/linux_net.py
+++ b/nova/network/linux_net.py
@@ -27,7 +27,6 @@ import os
from nova import config
from nova import db
from nova import exception
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import fileutils
from nova.openstack.common import importutils
@@ -756,6 +755,7 @@ def get_dhcp_opts(context, network_ref):
default_gw_vif[instance_uuid] = vifs[0]['id']
for datum in data:
+ instance_uuid = datum['instance_uuid']
if instance_uuid in default_gw_vif:
# we don't want default gateway for this fixed ip
if default_gw_vif[instance_uuid] != datum['vif_id']:
@@ -1146,11 +1146,14 @@ class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver):
@classmethod
@lockutils.synchronized('ensure_bridge', 'nova-', external=True)
- def ensure_bridge(_self, bridge, interface, net_attrs=None, gateway=True):
+ def ensure_bridge(_self, bridge, interface, net_attrs=None, gateway=True,
+ filtering=True):
"""Create a bridge unless it already exists.
:param interface: the interface to create the bridge on.
:param net_attrs: dictionary with attributes used to create bridge.
+ :param gateway: whether or not the bridge is a gateway.
+ :param filtering: whether or not to create filters on the bridge.
If net_attrs is set, it will add the net_attrs['gateway'] to the bridge
using net_attrs['broadcast'] and net_attrs['cidr']. It will also add
@@ -1161,7 +1164,7 @@ class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver):
"""
if not _device_exists(bridge):
- LOG.debug(_('Starting Bridge interface for %s'), interface)
+ LOG.debug(_('Starting Bridge %s'), bridge)
_execute('brctl', 'addbr', bridge, run_as_root=True)
_execute('brctl', 'setfd', bridge, 0, run_as_root=True)
# _execute('brctl setageing %s 10' % bridge, run_as_root=True)
@@ -1173,6 +1176,8 @@ class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver):
_execute('ip', 'link', 'set', bridge, 'up', run_as_root=True)
if interface:
+ msg = _('Adding interface %(interface)s to bridge %(bridge)s')
+ LOG.debug(msg % locals())
out, err = _execute('brctl', 'addif', bridge, interface,
check_exit_code=False, run_as_root=True)
@@ -1207,18 +1212,19 @@ class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver):
msg = _('Failed to add interface: %s') % err
raise exception.NovaException(msg)
- # Don't forward traffic unless we were told to be a gateway
- ipv4_filter = iptables_manager.ipv4['filter']
- if gateway:
- ipv4_filter.add_rule('FORWARD',
- '--in-interface %s -j ACCEPT' % bridge)
- ipv4_filter.add_rule('FORWARD',
- '--out-interface %s -j ACCEPT' % bridge)
- else:
- ipv4_filter.add_rule('FORWARD',
- '--in-interface %s -j DROP' % bridge)
- ipv4_filter.add_rule('FORWARD',
- '--out-interface %s -j DROP' % bridge)
+ if filtering:
+ # Don't forward traffic unless we were told to be a gateway
+ ipv4_filter = iptables_manager.ipv4['filter']
+ if gateway:
+ ipv4_filter.add_rule('FORWARD',
+ '--in-interface %s -j ACCEPT' % bridge)
+ ipv4_filter.add_rule('FORWARD',
+ '--out-interface %s -j ACCEPT' % bridge)
+ else:
+ ipv4_filter.add_rule('FORWARD',
+ '--in-interface %s -j DROP' % bridge)
+ ipv4_filter.add_rule('FORWARD',
+ '--out-interface %s -j DROP' % bridge)
# plugs interfaces using Open vSwitch
diff --git a/nova/network/manager.py b/nova/network/manager.py
index d032fe159..f54764d8f 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -49,6 +49,7 @@ import itertools
import math
import re
import socket
+import uuid
from eventlet import greenpool
import netaddr
@@ -57,7 +58,6 @@ from nova.compute import api as compute_api
from nova import config
from nova import context
from nova import exception
-from nova import flags
from nova import ipv6
from nova import manager
from nova.network import api as network_api
@@ -664,11 +664,13 @@ class FloatingIP(object):
return False if floating_ip.get('fixed_ip_id') else True
@wrap_check_policy
- def migrate_instance_start(self, context, instance_uuid, rxtx_factor,
- project_id, source, dest, floating_addresses):
+ def migrate_instance_start(self, context, instance_uuid,
+ floating_addresses,
+ rxtx_factor=None, project_id=None,
+ source=None, dest=None):
# We only care if floating_addresses are provided and we're
# switching hosts
- if not floating_addresses or source == dest:
+ if not floating_addresses or (source and source == dest):
return
LOG.info(_("Starting migration network for instance"
@@ -697,11 +699,15 @@ class FloatingIP(object):
{'host': None})
@wrap_check_policy
- def migrate_instance_finish(self, context, instance_uuid, rxtx_factor,
- project_id, source, dest, floating_addresses):
+ def migrate_instance_finish(self, context, instance_uuid,
+ floating_addresses, host=None,
+ rxtx_factor=None, project_id=None,
+ source=None, dest=None):
# We only care if floating_addresses are provided and we're
# switching hosts
- if not floating_addresses or source == dest:
+ if host and not dest:
+ dest = host
+ if not floating_addresses or (source and source == dest):
return
LOG.info(_("Finishing migration network for instance"
@@ -1263,7 +1269,7 @@ class NetworkManager(manager.SchedulerDependentManager):
vif = {'address': utils.generate_mac_address(),
'instance_uuid': instance_uuid,
'network_id': network_id,
- 'uuid': str(utils.gen_uuid())}
+ 'uuid': str(uuid.uuid4())}
# try FLAG times to create a vif record with a unique mac_address
for i in xrange(CONF.create_unique_mac_address_attempts):
try:
@@ -1284,6 +1290,10 @@ class NetworkManager(manager.SchedulerDependentManager):
network = self._get_network_by_id(context, network_id)
self._allocate_fixed_ips(context, instance_id, host, [network])
+ def get_backdoor_port(self, context):
+ """Return backdoor port for eventlet_backdoor"""
+ return self.backdoor_port
+
@wrap_check_policy
def remove_fixed_ip_from_instance(self, context, instance_id, host,
address):
@@ -1375,6 +1385,10 @@ class NetworkManager(manager.SchedulerDependentManager):
self.instance_dns_manager.delete_entry(n,
self.instance_dns_domain)
+ self.db.fixed_ip_update(context, address,
+ {'allocated': False,
+ 'virtual_interface_id': None})
+
if teardown:
network = self._get_network_by_id(context,
fixed_ip_ref['network_id'])
@@ -1401,10 +1415,6 @@ class NetworkManager(manager.SchedulerDependentManager):
# callback will get called by nova-dhcpbridge.
self.driver.release_dhcp(dev, address, vif['address'])
- self.db.fixed_ip_update(context, address,
- {'allocated': False,
- 'virtual_interface_id': None})
-
def lease_fixed_ip(self, context, address):
"""Called by dhcp-bridge when ip is leased."""
LOG.debug(_('Leased IP |%(address)s|'), locals(), context=context)
@@ -1632,8 +1642,6 @@ class NetworkManager(manager.SchedulerDependentManager):
if kwargs.get('vpn', False):
# this bit here is for vlan-manager
- del net['dns1']
- del net['dns2']
vlan = kwargs['vlan_start'] + index
net['vpn_private_address'] = str(subnet_v4[2])
net['dhcp_start'] = str(subnet_v4[3])
@@ -1955,12 +1963,16 @@ class FlatManager(NetworkManager):
"""Returns the floating IPs associated with a fixed_address"""
return []
- def migrate_instance_start(self, context, instance_uuid, rxtx_factor,
- project_id, source, dest, floating_addresses):
+ def migrate_instance_start(self, context, instance_uuid,
+ floating_addresses,
+ rxtx_factor=None, project_id=None,
+ source=None, dest=None):
pass
- def migrate_instance_finish(self, context, instance_uuid, rxtx_factor,
- project_id, source, dest, floating_addresses):
+ def migrate_instance_finish(self, context, instance_uuid,
+ floating_addresses, host=None,
+ rxtx_factor=None, project_id=None,
+ source=None, dest=None):
pass
diff --git a/nova/network/minidns.py b/nova/network/minidns.py
index 35e77f63a..37b85fb66 100644
--- a/nova/network/minidns.py
+++ b/nova/network/minidns.py
@@ -18,7 +18,6 @@ import tempfile
from nova import config
from nova import exception
-from nova import flags
CONF = config.CONF
diff --git a/nova/network/quantum/nova_ipam_lib.py b/nova/network/quantum/nova_ipam_lib.py
index b296715c2..4e6f5dbed 100644
--- a/nova/network/quantum/nova_ipam_lib.py
+++ b/nova/network/quantum/nova_ipam_lib.py
@@ -17,9 +17,9 @@
import netaddr
+from nova import config
from nova import db
from nova import exception
-from nova import flags
from nova import ipv6
from nova.network import manager
from nova.openstack.common import log as logging
diff --git a/nova/network/quantumv2/__init__.py b/nova/network/quantumv2/__init__.py
index a5fac840c..3377a32ab 100644
--- a/nova/network/quantumv2/__init__.py
+++ b/nova/network/quantumv2/__init__.py
@@ -17,7 +17,6 @@
from nova import config
from nova import exception
-from nova import flags
from nova.openstack.common import excutils
from nova.openstack.common import log as logging
from quantumclient import client
diff --git a/nova/network/quantumv2/api.py b/nova/network/quantumv2/api.py
index 8c2438669..38692351e 100644
--- a/nova/network/quantumv2/api.py
+++ b/nova/network/quantumv2/api.py
@@ -20,7 +20,6 @@ from nova.compute import api as compute_api
from nova import config
from nova.db import base
from nova import exception
-from nova import flags
from nova.network.api import refresh_cache
from nova.network import model as network_model
from nova.network import quantumv2
diff --git a/nova/network/rpcapi.py b/nova/network/rpcapi.py
index eba0aae52..c0742b1ca 100644
--- a/nova/network/rpcapi.py
+++ b/nova/network/rpcapi.py
@@ -19,7 +19,6 @@ Client side of the network RPC API.
"""
from nova import config
-from nova import flags
from nova.openstack.common import jsonutils
from nova.openstack.common import rpc
from nova.openstack.common.rpc import proxy as rpc_proxy
@@ -104,6 +103,9 @@ class NetworkAPI(rpc_proxy.RpcProxy):
'get_instance_id_by_floating_address',
address=address))
+ def get_backdoor_port(self, ctxt):
+ return self.call(ctxt, self.make_msg('get_backdoor_port'))
+
def get_vifs_by_instance(self, ctxt, instance_id):
# NOTE(vish): When the db calls are converted to store network
# data by instance_uuid, this should pass uuid instead.
@@ -268,10 +270,7 @@ class NetworkAPI(rpc_proxy.RpcProxy):
def migrate_instance_start(self, ctxt, instance_uuid, rxtx_factor,
project_id, source_compute, dest_compute,
floating_addresses, host=None):
- if host is not None:
- topic = rpc.queue_get_for(ctxt, self.topic, host)
- else:
- topic = self.topic
+ topic = rpc.queue_get_for(ctxt, self.topic, host)
return self.call(ctxt, self.make_msg(
'migrate_instance_start',
instance_uuid=instance_uuid,
@@ -286,10 +285,7 @@ class NetworkAPI(rpc_proxy.RpcProxy):
def migrate_instance_finish(self, ctxt, instance_uuid, rxtx_factor,
project_id, source_compute, dest_compute,
floating_addresses, host=None):
- if host is not None:
- topic = rpc.queue_get_for(ctxt, self.topic, host)
- else:
- topic = self.topic
+ topic = rpc.queue_get_for(ctxt, self.topic, host)
return self.call(ctxt, self.make_msg(
'migrate_instance_finish',
instance_uuid=instance_uuid,
diff --git a/nova/notifications.py b/nova/notifications.py
index 7153933ee..d43aef3c7 100644
--- a/nova/notifications.py
+++ b/nova/notifications.py
@@ -23,7 +23,6 @@ from nova import config
import nova.context
from nova import db
from nova import exception
-from nova import flags
from nova import network
from nova.network import model as network_model
from nova.openstack.common import cfg
diff --git a/nova/objectstore/s3server.py b/nova/objectstore/s3server.py
index 5b4ee916c..0975ca76f 100644
--- a/nova/objectstore/s3server.py
+++ b/nova/objectstore/s3server.py
@@ -45,7 +45,6 @@ import routes
import webob
from nova import config
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import fileutils
from nova import utils
diff --git a/nova/openstack/common/cfg.py b/nova/openstack/common/cfg.py
index 36e5e0ab0..8775a5f8a 100644
--- a/nova/openstack/common/cfg.py
+++ b/nova/openstack/common/cfg.py
@@ -236,10 +236,10 @@ log files:
This module also contains a global instance of the CommonConfigOpts class
in order to support a common usage pattern in OpenStack:
- from openstack.common import cfg
+ from nova.openstack.common import cfg
opts = [
- cfg.StrOpt('bind_host' default='0.0.0.0'),
+ cfg.StrOpt('bind_host', default='0.0.0.0'),
cfg.IntOpt('bind_port', default=9292),
]
@@ -1507,7 +1507,7 @@ class ConfigOpts(collections.Mapping):
if ('default' in info or 'override' in info):
continue
- if self._get(opt.name, group) is None:
+ if self._get(opt.dest, group) is None:
raise RequiredOptError(opt.name, group)
def _parse_cli_opts(self, args):
diff --git a/nova/common/eventlet_backdoor.py b/nova/openstack/common/eventlet_backdoor.py
index 4620d76ac..f18e84f6d 100644
--- a/nova/common/eventlet_backdoor.py
+++ b/nova/openstack/common/eventlet_backdoor.py
@@ -1,6 +1,6 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright (c) 2012 OpenStack, LLC.
+# Copyright (c) 2012 Openstack, LLC.
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
@@ -25,46 +25,43 @@ import eventlet
import eventlet.backdoor
import greenlet
-from nova import config
-from nova import flags
from nova.openstack.common import cfg
eventlet_backdoor_opts = [
cfg.IntOpt('backdoor_port',
default=None,
help='port for eventlet backdoor to listen')
- ]
+]
-CONF = config.CONF
+CONF = cfg.CONF
CONF.register_opts(eventlet_backdoor_opts)
-def dont_use_this():
+def _dont_use_this():
print "Don't use this, just disconnect instead"
-def find_objects(t):
+def _find_objects(t):
return filter(lambda o: isinstance(o, t), gc.get_objects())
-def print_greenthreads():
+def _print_greenthreads():
for i, gt in enumerate(find_objects(greenlet.greenlet)):
print i, gt
traceback.print_stack(gt.gr_frame)
print
-backdoor_locals = {
- 'exit': dont_use_this, # So we don't exit the entire process
- 'quit': dont_use_this, # So we don't exit the entire process
- 'fo': find_objects,
- 'pgt': print_greenthreads,
-}
-
-
def initialize_if_enabled():
+ backdoor_locals = {
+ 'exit': _dont_use_this, # So we don't exit the entire process
+ 'quit': _dont_use_this, # So we don't exit the entire process
+ 'fo': _find_objects,
+ 'pgt': _print_greenthreads,
+ }
+
if CONF.backdoor_port is None:
- return
+ return None
# NOTE(johannes): The standard sys.displayhook will print the value of
# the last expression and set it to __builtin__._, which overwrites
@@ -76,6 +73,8 @@ def initialize_if_enabled():
pprint.pprint(val)
sys.displayhook = displayhook
- eventlet.spawn(eventlet.backdoor.backdoor_server,
- eventlet.listen(('localhost', CONF.backdoor_port)),
- locals=backdoor_locals)
+ sock = eventlet.listen(('localhost', CONF.backdoor_port))
+ port = sock.getsockname()[1]
+ eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock,
+ locals=backdoor_locals)
+ return port
diff --git a/nova/openstack/common/gettextutils.py b/nova/openstack/common/gettextutils.py
index 235350cc4..d52309e62 100644
--- a/nova/openstack/common/gettextutils.py
+++ b/nova/openstack/common/gettextutils.py
@@ -20,7 +20,7 @@ gettext for openstack-common modules.
Usual usage in an openstack.common module:
- from openstack.common.gettextutils import _
+ from nova.openstack.common.gettextutils import _
"""
import gettext
diff --git a/nova/openstack/common/lockutils.py b/nova/openstack/common/lockutils.py
index 2840ce6f7..ba390dc69 100644
--- a/nova/openstack/common/lockutils.py
+++ b/nova/openstack/common/lockutils.py
@@ -24,7 +24,6 @@ import tempfile
import time
import weakref
-from eventlet import greenthread
from eventlet import semaphore
from nova.openstack.common import cfg
diff --git a/nova/openstack/common/notifier/rabbit_notifier.py b/nova/openstack/common/notifier/rabbit_notifier.py
index c7b3f54fe..11067fb0a 100644
--- a/nova/openstack/common/notifier/rabbit_notifier.py
+++ b/nova/openstack/common/notifier/rabbit_notifier.py
@@ -1,4 +1,4 @@
-# Copyright 2011 OpenStack LLC.
+# Copyright 2012 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -14,33 +14,16 @@
# under the License.
-from nova.openstack.common import cfg
-from nova.openstack.common import context as req_context
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
-from nova.openstack.common import rpc
+from nova.openstack.common.notifier import rpc_notifier
LOG = logging.getLogger(__name__)
-notification_topic_opt = cfg.ListOpt(
- 'notification_topics', default=['notifications', ],
- help='AMQP topic used for openstack notifications')
-
-CONF = cfg.CONF
-CONF.register_opt(notification_topic_opt)
-
def notify(context, message):
- """Sends a notification to the RabbitMQ"""
- if not context:
- context = req_context.get_admin_context()
- priority = message.get('priority',
- CONF.default_notification_level)
- priority = priority.lower()
- for topic in CONF.notification_topics:
- topic = '%s.%s' % (topic, priority)
- try:
- rpc.notify(context, topic, message)
- except Exception, e:
- LOG.exception(_("Could not send notification to %(topic)s. "
- "Payload=%(message)s"), locals())
+ """Deprecated in Grizzly. Please use rpc_notifier instead."""
+
+ LOG.deprecated(_("The rabbit_notifier is now deprecated."
+ " Please use rpc_notifier instead."))
+ rpc_notifier.notify(context, message)
diff --git a/nova/openstack/common/notifier/rpc_notifier.py b/nova/openstack/common/notifier/rpc_notifier.py
new file mode 100644
index 000000000..aa9e8860e
--- /dev/null
+++ b/nova/openstack/common/notifier/rpc_notifier.py
@@ -0,0 +1,46 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from nova.openstack.common import cfg
+from nova.openstack.common import context as req_context
+from nova.openstack.common.gettextutils import _
+from nova.openstack.common import log as logging
+from nova.openstack.common import rpc
+
+LOG = logging.getLogger(__name__)
+
+notification_topic_opt = cfg.ListOpt(
+ 'notification_topics', default=['notifications', ],
+ help='AMQP topic used for openstack notifications')
+
+CONF = cfg.CONF
+CONF.register_opt(notification_topic_opt)
+
+
+def notify(context, message):
+ """Sends a notification via RPC"""
+ if not context:
+ context = req_context.get_admin_context()
+ priority = message.get('priority',
+ CONF.default_notification_level)
+ priority = priority.lower()
+ for topic in CONF.notification_topics:
+ topic = '%s.%s' % (topic, priority)
+ try:
+ rpc.notify(context, topic, message)
+ except Exception, e:
+ LOG.exception(_("Could not send notification to %(topic)s. "
+ "Payload=%(message)s"), locals())
diff --git a/nova/openstack/common/rpc/__init__.py b/nova/openstack/common/rpc/__init__.py
index 0f82c47a2..bf2b2e9e0 100644
--- a/nova/openstack/common/rpc/__init__.py
+++ b/nova/openstack/common/rpc/__init__.py
@@ -250,7 +250,7 @@ def queue_get_for(context, topic, host):
Messages sent to the 'foo.<host>' topic are sent to the nova-foo service on
<host>.
"""
- return '%s.%s' % (topic, host)
+ return '%s.%s' % (topic, host) if host else topic
_RPCIMPL = None
diff --git a/nova/openstack/common/rpc/impl_kombu.py b/nova/openstack/common/rpc/impl_kombu.py
index 7b16e0e19..bb0ade27c 100644
--- a/nova/openstack/common/rpc/impl_kombu.py
+++ b/nova/openstack/common/rpc/impl_kombu.py
@@ -31,9 +31,9 @@ import kombu.messaging
from nova.openstack.common import cfg
from nova.openstack.common.gettextutils import _
+from nova.openstack.common import network_utils
from nova.openstack.common.rpc import amqp as rpc_amqp
from nova.openstack.common.rpc import common as rpc_common
-from nova.openstack.common import network_utils
kombu_opts = [
cfg.StrOpt('kombu_ssl_version',
@@ -267,6 +267,7 @@ class FanoutConsumer(ConsumerBase):
# Default options
options = {'durable': False,
+ 'queue_arguments': _get_queue_arguments(conf),
'auto_delete': True,
'exclusive': True}
options.update(kwargs)
@@ -408,18 +409,18 @@ class Connection(object):
hostname, port = network_utils.parse_host_port(
adr, default_port=self.conf.rabbit_port)
- params = {}
+ params = {
+ 'hostname': hostname,
+ 'port': port,
+ 'userid': self.conf.rabbit_userid,
+ 'password': self.conf.rabbit_password,
+ 'virtual_host': self.conf.rabbit_virtual_host,
+ }
for sp_key, value in server_params.iteritems():
p_key = server_params_to_kombu_params.get(sp_key, sp_key)
params[p_key] = value
- params.setdefault('hostname', hostname)
- params.setdefault('port', port)
- params.setdefault('userid', self.conf.rabbit_userid)
- params.setdefault('password', self.conf.rabbit_password)
- params.setdefault('virtual_host', self.conf.rabbit_virtual_host)
-
if self.conf.fake_rabbit:
params['transport'] = 'memory'
if self.conf.rabbit_use_ssl:
@@ -776,7 +777,7 @@ def cast_to_server(conf, context, server_params, topic, msg):
def fanout_cast_to_server(conf, context, server_params, topic, msg):
"""Sends a message on a fanout exchange to a specific server."""
- return rpc_amqp.cast_to_server(
+ return rpc_amqp.fanout_cast_to_server(
conf, context, server_params, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
diff --git a/nova/openstack/common/rpc/impl_qpid.py b/nova/openstack/common/rpc/impl_qpid.py
index 70a03c5bf..b87050753 100644
--- a/nova/openstack/common/rpc/impl_qpid.py
+++ b/nova/openstack/common/rpc/impl_qpid.py
@@ -50,24 +50,6 @@ qpid_opts = [
cfg.StrOpt('qpid_sasl_mechanisms',
default='',
help='Space separated list of SASL mechanisms to use for auth'),
- cfg.BoolOpt('qpid_reconnect',
- default=True,
- help='Automatically reconnect'),
- cfg.IntOpt('qpid_reconnect_timeout',
- default=0,
- help='Reconnection timeout in seconds'),
- cfg.IntOpt('qpid_reconnect_limit',
- default=0,
- help='Max reconnections before giving up'),
- cfg.IntOpt('qpid_reconnect_interval_min',
- default=0,
- help='Minimum seconds between reconnection attempts'),
- cfg.IntOpt('qpid_reconnect_interval_max',
- default=0,
- help='Maximum seconds between reconnection attempts'),
- cfg.IntOpt('qpid_reconnect_interval',
- default=0,
- help='Equivalent to setting max and min to the same value'),
cfg.IntOpt('qpid_heartbeat',
default=60,
help='Seconds between connection keepalive heartbeats'),
@@ -294,50 +276,36 @@ class Connection(object):
self.consumer_thread = None
self.conf = conf
- if server_params is None:
- server_params = {}
-
- default_params = dict(hostname=self.conf.qpid_hostname,
- port=self.conf.qpid_port,
- username=self.conf.qpid_username,
- password=self.conf.qpid_password)
-
- params = server_params
- for key in default_params.keys():
- params.setdefault(key, default_params[key])
+ params = {
+ 'hostname': self.conf.qpid_hostname,
+ 'port': self.conf.qpid_port,
+ 'username': self.conf.qpid_username,
+ 'password': self.conf.qpid_password,
+ }
+ params.update(server_params or {})
self.broker = params['hostname'] + ":" + str(params['port'])
+ self.username = params['username']
+ self.password = params['password']
+ self.connection_create()
+ self.reconnect()
+
+ def connection_create(self):
# Create the connection - this does not open the connection
self.connection = qpid.messaging.Connection(self.broker)
# Check if flags are set and if so set them for the connection
# before we call open
- self.connection.username = params['username']
- self.connection.password = params['password']
+ self.connection.username = self.username
+ self.connection.password = self.password
+
self.connection.sasl_mechanisms = self.conf.qpid_sasl_mechanisms
- self.connection.reconnect = self.conf.qpid_reconnect
- if self.conf.qpid_reconnect_timeout:
- self.connection.reconnect_timeout = (
- self.conf.qpid_reconnect_timeout)
- if self.conf.qpid_reconnect_limit:
- self.connection.reconnect_limit = self.conf.qpid_reconnect_limit
- if self.conf.qpid_reconnect_interval_max:
- self.connection.reconnect_interval_max = (
- self.conf.qpid_reconnect_interval_max)
- if self.conf.qpid_reconnect_interval_min:
- self.connection.reconnect_interval_min = (
- self.conf.qpid_reconnect_interval_min)
- if self.conf.qpid_reconnect_interval:
- self.connection.reconnect_interval = (
- self.conf.qpid_reconnect_interval)
+ # Reconnection is done by self.reconnect()
+ self.connection.reconnect = False
self.connection.heartbeat = self.conf.qpid_heartbeat
self.connection.protocol = self.conf.qpid_protocol
self.connection.tcp_nodelay = self.conf.qpid_tcp_nodelay
- # Open is part of reconnect -
- # NOTE(WGH) not sure we need this with the reconnect flags
- self.reconnect()
-
def _register_consumer(self, consumer):
self.consumers[str(consumer.get_receiver())] = consumer
@@ -352,12 +320,18 @@ class Connection(object):
except qpid.messaging.exceptions.ConnectionError:
pass
+ delay = 1
while True:
try:
+ self.connection_create()
self.connection.open()
except qpid.messaging.exceptions.ConnectionError, e:
- LOG.error(_('Unable to connect to AMQP server: %s'), e)
- time.sleep(self.conf.qpid_reconnect_interval or 1)
+ msg_dict = dict(e=e, delay=delay)
+ msg = _("Unable to connect to AMQP server: %(e)s. "
+ "Sleeping %(delay)s seconds") % msg_dict
+ LOG.error(msg)
+ time.sleep(delay)
+ delay = min(2 * delay, 60)
else:
break
@@ -365,10 +339,14 @@ class Connection(object):
self.session = self.connection.session()
- for consumer in self.consumers.itervalues():
- consumer.reconnect(self.session)
-
if self.consumers:
+ consumers = self.consumers
+ self.consumers = {}
+
+ for consumer in consumers.itervalues():
+ consumer.reconnect(self.session)
+ self._register_consumer(consumer)
+
LOG.debug(_("Re-established AMQP queues"))
def ensure(self, error_callback, method, *args, **kwargs):
diff --git a/nova/openstack/common/rpc/impl_zmq.py b/nova/openstack/common/rpc/impl_zmq.py
index 4ffb1ae69..0daf07cf4 100644
--- a/nova/openstack/common/rpc/impl_zmq.py
+++ b/nova/openstack/common/rpc/impl_zmq.py
@@ -546,7 +546,7 @@ def _call(addr, context, msg_id, topic, msg, timeout=None):
timeout = timeout or CONF.rpc_response_timeout
# The msg_id is used to track replies.
- msg_id = str(uuid.uuid4().hex)
+ msg_id = uuid.uuid4().hex
# Replies always come into the reply service.
reply_topic = "zmq_replies.%s" % CONF.rpc_zmq_host
diff --git a/nova/openstack/common/rpc/service.py b/nova/openstack/common/rpc/service.py
new file mode 100644
index 000000000..15508e432
--- /dev/null
+++ b/nova/openstack/common/rpc/service.py
@@ -0,0 +1,70 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+# Copyright 2011 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.openstack.common.gettextutils import _
+from nova.openstack.common import log as logging
+from nova.openstack.common import rpc
+from nova.openstack.common.rpc import dispatcher as rpc_dispatcher
+from nova.openstack.common import service
+
+
+LOG = logging.getLogger(__name__)
+
+
+class Service(service.Service):
+ """Service object for binaries running on hosts.
+
+ A service enables rpc by listening to queues based on topic and host."""
+ def __init__(self, host, topic, manager=None):
+ super(Service, self).__init__()
+ self.host = host
+ self.topic = topic
+ if manager is None:
+ self.manager = self
+ else:
+ self.manager = manager
+
+ def start(self):
+ super(Service, self).start()
+
+ self.conn = rpc.create_connection(new=True)
+ LOG.debug(_("Creating Consumer connection for Service %s") %
+ self.topic)
+
+ dispatcher = rpc_dispatcher.RpcDispatcher([self.manager])
+
+ # Share this same connection for these Consumers
+ self.conn.create_consumer(self.topic, dispatcher, fanout=False)
+
+ node_topic = '%s.%s' % (self.topic, self.host)
+ self.conn.create_consumer(node_topic, dispatcher, fanout=False)
+
+ self.conn.create_consumer(self.topic, dispatcher, fanout=True)
+
+ # Consume from all consumers in a thread
+ self.conn.consume_in_thread()
+
+ def stop(self):
+ # Try to shut the connection down, but if we get any sort of
+ # errors, go ahead and ignore them.. as we're shutting down anyway
+ try:
+ self.conn.close()
+ except Exception:
+ pass
+ super(Service, self).stop()
diff --git a/nova/openstack/common/setup.py b/nova/openstack/common/setup.py
index 4e2a57717..e6f72f034 100644
--- a/nova/openstack/common/setup.py
+++ b/nova/openstack/common/setup.py
@@ -117,8 +117,12 @@ def write_requirements():
def _run_shell_command(cmd):
- output = subprocess.Popen(["/bin/sh", "-c", cmd],
- stdout=subprocess.PIPE)
+ if os.name == 'nt':
+ output = subprocess.Popen(["cmd.exe", "/C", cmd],
+ stdout=subprocess.PIPE)
+ else:
+ output = subprocess.Popen(["/bin/sh", "-c", cmd],
+ stdout=subprocess.PIPE)
out = output.communicate()
if len(out) == 0:
return None
@@ -136,15 +140,17 @@ def _get_git_next_version_suffix(branch_name):
_run_shell_command("git fetch origin +refs/meta/*:refs/remotes/meta/*")
milestone_cmd = "git show meta/openstack/release:%s" % branch_name
milestonever = _run_shell_command(milestone_cmd)
- if not milestonever:
- milestonever = ""
+ if milestonever:
+ first_half = "%s~%s" % (milestonever, datestamp)
+ else:
+ first_half = datestamp
+
post_version = _get_git_post_version()
# post version should look like:
# 0.1.1.4.gcc9e28a
# where the bit after the last . is the short sha, and the bit between
# the last and second to last is the revno count
(revno, sha) = post_version.split(".")[-2:]
- first_half = "%s~%s" % (milestonever, datestamp)
second_half = "%s%s.%s" % (revno_prefix, revno, sha)
return ".".join((first_half, second_half))
diff --git a/nova/openstack/common/uuidutils.py b/nova/openstack/common/uuidutils.py
index 51042a798..7608acb94 100644
--- a/nova/openstack/common/uuidutils.py
+++ b/nova/openstack/common/uuidutils.py
@@ -22,6 +22,10 @@ UUID related utilities and helper functions.
import uuid
+def generate_uuid():
+ return str(uuid.uuid4())
+
+
def is_uuid_like(val):
"""Returns validation of a value as a UUID.
diff --git a/nova/policy.py b/nova/policy.py
index 9506635e9..dc4bc7031 100644
--- a/nova/policy.py
+++ b/nova/policy.py
@@ -21,7 +21,6 @@ import os.path
from nova import config
from nova import exception
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import policy
from nova import utils
diff --git a/nova/quota.py b/nova/quota.py
index e4c6f1d49..730c466d9 100644
--- a/nova/quota.py
+++ b/nova/quota.py
@@ -16,14 +16,13 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Quotas for instances, volumes, and floating ips."""
+"""Quotas for instances, and floating ips."""
import datetime
from nova import config
from nova import db
from nova import exception
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
@@ -42,13 +41,7 @@ quota_opts = [
cfg.IntOpt('quota_ram',
default=50 * 1024,
help='megabytes of instance ram allowed per project'),
- cfg.IntOpt('quota_volumes',
- default=10,
- help='number of volumes allowed per project'),
- cfg.IntOpt('quota_gigabytes',
- default=1000,
- help='number of volume gigabytes allowed per project'),
- cfg.IntOpt('quota_floating_ips',
+ cfg.IntOpt('quota_floating_ips',
default=10,
help='number of floating ips allowed per project'),
cfg.IntOpt('quota_metadata_items',
@@ -814,12 +807,6 @@ def _sync_instances(context, project_id, session):
context, project_id, session=session)))
-def _sync_volumes(context, project_id, session):
- return dict(zip(('volumes', 'gigabytes'),
- db.volume_data_get_for_project(
- context, project_id, session=session)))
-
-
def _sync_floating_ips(context, project_id, session):
return dict(floating_ips=db.floating_ip_count_by_project(
context, project_id, session=session))
@@ -837,8 +824,6 @@ resources = [
ReservableResource('instances', _sync_instances, 'quota_instances'),
ReservableResource('cores', _sync_instances, 'quota_cores'),
ReservableResource('ram', _sync_instances, 'quota_ram'),
- ReservableResource('volumes', _sync_volumes, 'quota_volumes'),
- ReservableResource('gigabytes', _sync_volumes, 'quota_gigabytes'),
ReservableResource('floating_ips', _sync_floating_ips,
'quota_floating_ips'),
AbsoluteResource('metadata_items', 'quota_metadata_items'),
diff --git a/nova/scheduler/baremetal_host_manager.py b/nova/scheduler/baremetal_host_manager.py
new file mode 100644
index 000000000..fdf482de7
--- /dev/null
+++ b/nova/scheduler/baremetal_host_manager.py
@@ -0,0 +1,71 @@
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# Copyright (c) 2011 OpenStack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Manage hosts in the current zone.
+"""
+
+from nova.scheduler import host_manager
+
+
+class BaremetalNodeState(host_manager.HostState):
+ """Mutable and immutable information tracked for a host.
+ This is an attempt to remove the ad-hoc data structures
+ previously used and lock down access.
+ """
+
+ def update_from_compute_node(self, compute):
+ """Update information about a host from its compute_node info."""
+ all_ram_mb = compute['memory_mb']
+
+ free_disk_mb = compute['free_disk_gb'] * 1024
+ free_ram_mb = compute['free_ram_mb']
+
+ self.free_ram_mb = free_ram_mb
+ self.total_usable_ram_mb = all_ram_mb
+ self.free_disk_mb = free_disk_mb
+ self.vcpus_total = compute['vcpus']
+ self.vcpus_used = compute['vcpus_used']
+
+ def consume_from_instance(self, instance):
+ self.free_ram_mb = 0
+ self.free_disk_mb = 0
+ self.vcpus_used = self.vcpus_total
+
+
+def new_host_state(self, host, node, capabilities=None, service=None):
+ """Returns an instance of BaremetalHostState or HostState according to
+ capabilities. If 'baremetal_driver' is in capabilities, it returns an
+ instance of BaremetalHostState. If not, returns an instance of HostState.
+ """
+ if capabilities is None:
+ capabilities = {}
+ cap = capabilities.get('compute', {})
+ if bool(cap.get('baremetal_driver')):
+ return BaremetalNodeState(host, node, capabilities, service)
+ else:
+ return host_manager.HostState(host, node, capabilities, service)
+
+
+class BaremetalHostManager(host_manager.HostManager):
+ """Bare-Metal HostManager class."""
+
+ # Override.
+ # Yes, this is not a class, and it is OK
+ host_state_cls = new_host_state
+
+ def __init__(self):
+ super(BaremetalHostManager, self).__init__()
diff --git a/nova/scheduler/chance.py b/nova/scheduler/chance.py
index 1a608da29..86e21929c 100644
--- a/nova/scheduler/chance.py
+++ b/nova/scheduler/chance.py
@@ -25,7 +25,6 @@ import random
from nova import config
from nova import exception
-from nova import flags
from nova.scheduler import driver
CONF = config.CONF
diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py
index f93268906..39ea55527 100644
--- a/nova/scheduler/driver.py
+++ b/nova/scheduler/driver.py
@@ -31,7 +31,6 @@ from nova.compute import vm_states
from nova import config
from nova import db
from nova import exception
-from nova import flags
from nova import notifications
from nova.openstack.common import cfg
from nova.openstack.common import importutils
@@ -99,6 +98,15 @@ def instance_update_db(context, instance_uuid):
return db.instance_update(context, instance_uuid, values)
+def db_instance_node_set(context, instance_uuid, node):
+ '''Set the node field of an Instance.
+
+ :returns: An Instance with the updated fields set properly.
+ '''
+ values = {'node': node}
+ return db.instance_update(context, instance_uuid, values)
+
+
def cast_to_compute_host(context, host, method, **kwargs):
"""Cast request to a compute host queue"""
diff --git a/nova/scheduler/filter_scheduler.py b/nova/scheduler/filter_scheduler.py
index c43e48876..636818e59 100644
--- a/nova/scheduler/filter_scheduler.py
+++ b/nova/scheduler/filter_scheduler.py
@@ -19,16 +19,11 @@ You can customize this scheduler by specifying your own Host Filters and
Weighing Functions.
"""
-import operator
-
from nova import config
from nova import exception
-from nova import flags
-from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common.notifier import api as notifier
from nova.scheduler import driver
-from nova.scheduler import least_cost
from nova.scheduler import scheduler_options
CONF = config.CONF
@@ -39,7 +34,7 @@ class FilterScheduler(driver.Scheduler):
"""Scheduler that can be used for filtering and weighing."""
def __init__(self, *args, **kwargs):
super(FilterScheduler, self).__init__(*args, **kwargs)
- self.cost_function_cache = {}
+ self.cost_function_cache = None
self.options = scheduler_options.SchedulerOptions()
def schedule_run_instance(self, context, request_spec,
@@ -61,9 +56,8 @@ class FilterScheduler(driver.Scheduler):
notifier.notify(context, notifier.publisher_id("scheduler"),
'scheduler.run_instance.start', notifier.INFO, payload)
- weighted_hosts = self._schedule(context, CONF.compute_topic,
- request_spec, filter_properties,
- instance_uuids)
+ weighed_hosts = self._schedule(context, request_spec,
+ filter_properties, instance_uuids)
# NOTE(comstud): Make sure we do not pass this through. It
# contains an instance of RpcContext that cannot be serialized.
@@ -74,11 +68,11 @@ class FilterScheduler(driver.Scheduler):
try:
try:
- weighted_host = weighted_hosts.pop(0)
+ weighed_host = weighed_hosts.pop(0)
except IndexError:
raise exception.NoValidHost(reason="")
- self._provision_resource(context, weighted_host,
+ self._provision_resource(context, weighed_host,
request_spec,
filter_properties,
requested_networks,
@@ -108,41 +102,45 @@ class FilterScheduler(driver.Scheduler):
the prep_resize operation to it.
"""
- hosts = self._schedule(context, CONF.compute_topic, request_spec,
- filter_properties, [instance['uuid']])
- if not hosts:
+ weighed_hosts = self._schedule(context, request_spec,
+ filter_properties, [instance['uuid']])
+ if not weighed_hosts:
raise exception.NoValidHost(reason="")
- host = hosts.pop(0)
+ weighed_host = weighed_hosts.pop(0)
self._post_select_populate_filter_properties(filter_properties,
- host.host_state)
+ weighed_host.obj)
# context is not serializable
filter_properties.pop('context', None)
# Forward off to the host
self.compute_rpcapi.prep_resize(context, image, instance,
- instance_type, host.host_state.host, reservations,
+ instance_type, weighed_host.obj.host, reservations,
request_spec=request_spec, filter_properties=filter_properties)
- def _provision_resource(self, context, weighted_host, request_spec,
+ def _provision_resource(self, context, weighed_host, request_spec,
filter_properties, requested_networks, injected_files,
admin_password, is_first_time, instance_uuid=None):
"""Create the requested resource in this Zone."""
payload = dict(request_spec=request_spec,
- weighted_host=weighted_host.to_dict(),
+ weighted_host=weighed_host.to_dict(),
instance_id=instance_uuid)
notifier.notify(context, notifier.publisher_id("scheduler"),
'scheduler.run_instance.scheduled', notifier.INFO,
payload)
- updated_instance = driver.instance_update_db(context, instance_uuid)
+ # TODO(NTTdocomo): Combine the next two updates into one
+ driver.db_instance_node_set(context,
+ instance_uuid, weighed_host.obj.nodename)
+ updated_instance = driver.instance_update_db(context,
+ instance_uuid)
self._post_select_populate_filter_properties(filter_properties,
- weighted_host.host_state)
+ weighed_host.obj)
self.compute_rpcapi.run_instance(context, instance=updated_instance,
- host=weighted_host.host_state.host,
+ host=weighed_host.obj.host,
request_spec=request_spec, filter_properties=filter_properties,
requested_networks=requested_networks,
injected_files=injected_files,
@@ -220,20 +218,15 @@ class FilterScheduler(driver.Scheduler):
"instance %(instance_uuid)s") % locals()
raise exception.NoValidHost(reason=msg)
- def _schedule(self, context, topic, request_spec, filter_properties,
+ def _schedule(self, context, request_spec, filter_properties,
instance_uuids=None):
"""Returns a list of hosts that meet the required specs,
ordered by their fitness.
"""
elevated = context.elevated()
- if topic != CONF.compute_topic:
- msg = _("Scheduler only understands Compute nodes (for now)")
- raise NotImplementedError(msg)
-
instance_properties = request_spec['instance_properties']
instance_type = request_spec.get("instance_type", None)
- cost_functions = self.get_cost_functions()
config_options = self._get_configuration_options()
# check retry policy. Rather ugly use of instance_uuids[0]...
@@ -257,14 +250,10 @@ class FilterScheduler(driver.Scheduler):
# host, we virtually consume resources on it so subsequent
# selections can adjust accordingly.
- # unfiltered_hosts_dict is {host : ZoneManager.HostInfo()}
- unfiltered_hosts_dict = self.host_manager.get_all_host_states(
- elevated, topic)
-
# Note: remember, we are using an iterator here. So only
# traverse this list once. This can bite you if the hosts
# are being scanned in a filter or weighing function.
- hosts = unfiltered_hosts_dict.itervalues()
+ hosts = self.host_manager.get_all_host_states(elevated)
selected_hosts = []
if instance_uuids:
@@ -273,7 +262,7 @@ class FilterScheduler(driver.Scheduler):
num_instances = request_spec.get('num_instances', 1)
for num in xrange(num_instances):
# Filter local hosts based on requirements ...
- hosts = self.host_manager.filter_hosts(hosts,
+ hosts = self.host_manager.get_filtered_hosts(hosts,
filter_properties)
if not hosts:
# Can't get any more locally.
@@ -281,63 +270,12 @@ class FilterScheduler(driver.Scheduler):
LOG.debug(_("Filtered %(hosts)s") % locals())
- # weighted_host = WeightedHost() ... the best
- # host for the job.
- # TODO(comstud): filter_properties will also be used for
- # weighing and I plan fold weighing into the host manager
- # in a future patch. I'll address the naming of this
- # variable at that time.
- weighted_host = least_cost.weighted_sum(cost_functions,
- hosts, filter_properties)
- LOG.debug(_("Weighted %(weighted_host)s") % locals())
- selected_hosts.append(weighted_host)
-
+ weighed_hosts = self.host_manager.get_weighed_hosts(hosts,
+ filter_properties)
+ best_host = weighed_hosts[0]
+ LOG.debug(_("Choosing host %(best_host)s") % locals())
+ selected_hosts.append(best_host)
# Now consume the resources so the filter/weights
# will change for the next instance.
- weighted_host.host_state.consume_from_instance(
- instance_properties)
-
- selected_hosts.sort(key=operator.attrgetter('weight'))
+ best_host.obj.consume_from_instance(instance_properties)
return selected_hosts
-
- def get_cost_functions(self, topic=None):
- """Returns a list of tuples containing weights and cost functions to
- use for weighing hosts
- """
- if topic is None:
- # Schedulers only support compute right now.
- topic = CONF.compute_topic
- if topic in self.cost_function_cache:
- return self.cost_function_cache[topic]
-
- cost_fns = []
- for cost_fn_str in CONF.least_cost_functions:
- if '.' in cost_fn_str:
- short_name = cost_fn_str.split('.')[-1]
- else:
- short_name = cost_fn_str
- cost_fn_str = "%s.%s.%s" % (
- __name__, self.__class__.__name__, short_name)
- if not (short_name.startswith('%s_' % topic) or
- short_name.startswith('noop')):
- continue
-
- try:
- # NOTE: import_class is somewhat misnamed since
- # the weighing function can be any non-class callable
- # (i.e., no 'self')
- cost_fn = importutils.import_class(cost_fn_str)
- except ImportError:
- raise exception.SchedulerCostFunctionNotFound(
- cost_fn_str=cost_fn_str)
-
- try:
- flag_name = "%s_weight" % cost_fn.__name__
- weight = getattr(CONF, flag_name)
- except AttributeError:
- raise exception.SchedulerWeightFlagNotFound(
- flag_name=flag_name)
- cost_fns.append((weight, cost_fn))
-
- self.cost_function_cache[topic] = cost_fns
- return cost_fns
diff --git a/nova/scheduler/filters/__init__.py b/nova/scheduler/filters/__init__.py
index 2056f968e..6e8e7ea7b 100644
--- a/nova/scheduler/filters/__init__.py
+++ b/nova/scheduler/filters/__init__.py
@@ -17,71 +17,41 @@
Scheduler host filters
"""
-import os
-import types
+from nova import filters
+from nova.openstack.common import log as logging
-from nova import exception
-from nova.openstack.common import importutils
+LOG = logging.getLogger(__name__)
-class BaseHostFilter(object):
+class BaseHostFilter(filters.BaseFilter):
"""Base class for host filters."""
+ def _filter_one(self, obj, filter_properties):
+ """Return True if the object passes the filter, otherwise False."""
+ return self.host_passes(obj, filter_properties)
def host_passes(self, host_state, filter_properties):
+ """Return True if the HostState passes the filter, otherwise False.
+ Override this in a subclass.
+ """
raise NotImplementedError()
- def _full_name(self):
- """module.classname of the filter."""
- return "%s.%s" % (self.__module__, self.__class__.__name__)
+class HostFilterHandler(filters.BaseFilterHandler):
+ def __init__(self):
+ super(HostFilterHandler, self).__init__(BaseHostFilter)
-def _is_filter_class(cls):
- """Return whether a class is a valid Host Filter class."""
- return type(cls) is types.TypeType and issubclass(cls, BaseHostFilter)
+def all_filters():
+ """Return a list of filter classes found in this directory.
-def _get_filter_classes_from_module(module_name):
- """Get all filter classes from a module."""
- classes = []
- module = importutils.import_module(module_name)
- for obj_name in dir(module):
- itm = getattr(module, obj_name)
- if _is_filter_class(itm):
- classes.append(itm)
- return classes
+ This method is used as the default for available scheduler filters
+ and should return a list of all filter classes available.
+ """
+ return HostFilterHandler().get_all_classes()
def standard_filters():
- """Return a list of filter classes found in this directory."""
- classes = []
- filters_dir = __path__[0]
- for dirpath, dirnames, filenames in os.walk(filters_dir):
- relpath = os.path.relpath(dirpath, filters_dir)
- if relpath == '.':
- relpkg = ''
- else:
- relpkg = '.%s' % '.'.join(relpath.split(os.sep))
- for fname in filenames:
- root, ext = os.path.splitext(fname)
- if ext != '.py' or root == '__init__':
- continue
- module_name = "%s%s.%s" % (__package__, relpkg, root)
- mod_classes = _get_filter_classes_from_module(module_name)
- classes.extend(mod_classes)
- return classes
-
-
-def get_filter_classes(filter_class_names):
- """Get filter classes from class names."""
- classes = []
- for cls_name in filter_class_names:
- obj = importutils.import_class(cls_name)
- if _is_filter_class(obj):
- classes.append(obj)
- elif type(obj) is types.FunctionType:
- # Get list of classes from a function
- classes.extend(obj())
- else:
- raise exception.ClassNotFound(class_name=cls_name,
- exception='Not a valid scheduler filter')
- return classes
+ """Deprecated. Configs should change to use all_filters()."""
+ LOG.deprecated(_("Use 'nova.scheduler.filters.all_filters' instead "
+ "of 'nova.scheduler.filters.standard_filters'"))
+ return all_filters()
diff --git a/nova/scheduler/filters/compute_filter.py b/nova/scheduler/filters/compute_filter.py
index 202f8232a..93ec7d16e 100644
--- a/nova/scheduler/filters/compute_filter.py
+++ b/nova/scheduler/filters/compute_filter.py
@@ -14,7 +14,6 @@
# under the License.
from nova import config
-from nova import flags
from nova.openstack.common import log as logging
from nova.scheduler import filters
from nova import utils
@@ -29,9 +28,6 @@ class ComputeFilter(filters.BaseHostFilter):
def host_passes(self, host_state, filter_properties):
"""Returns True for only active compute nodes"""
- instance_type = filter_properties.get('instance_type')
- if host_state.topic != CONF.compute_topic or not instance_type:
- return True
capabilities = host_state.capabilities
service = host_state.service
diff --git a/nova/scheduler/filters/core_filter.py b/nova/scheduler/filters/core_filter.py
index 9c93df930..aec05ecc0 100644
--- a/nova/scheduler/filters/core_filter.py
+++ b/nova/scheduler/filters/core_filter.py
@@ -16,7 +16,6 @@
# under the License.
from nova import config
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.scheduler import filters
@@ -38,7 +37,7 @@ class CoreFilter(filters.BaseHostFilter):
def host_passes(self, host_state, filter_properties):
"""Return True if host has sufficient CPU cores."""
instance_type = filter_properties.get('instance_type')
- if host_state.topic != CONF.compute_topic or not instance_type:
+ if not instance_type:
return True
if not host_state.vcpus_total:
diff --git a/nova/scheduler/filters/disk_filter.py b/nova/scheduler/filters/disk_filter.py
index 358583b8a..77ee75c99 100644
--- a/nova/scheduler/filters/disk_filter.py
+++ b/nova/scheduler/filters/disk_filter.py
@@ -14,7 +14,6 @@
# under the License.
from nova import config
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.scheduler import filters
diff --git a/nova/scheduler/filters/image_props_filter.py b/nova/scheduler/filters/image_props_filter.py
index f9ef693b3..a545ce9ff 100644
--- a/nova/scheduler/filters/image_props_filter.py
+++ b/nova/scheduler/filters/image_props_filter.py
@@ -26,7 +26,7 @@ class ImagePropertiesFilter(filters.BaseHostFilter):
"""Filter compute nodes that satisfy instance image properties.
The ImagePropertiesFilter filters compute nodes that satisfy
- any architecture, hpervisor type, or virtual machine mode properties
+ any architecture, hypervisor type, or virtual machine mode properties
specified on the instance's image properties. Image properties are
contained in the image dictionary in the request_spec.
"""
diff --git a/nova/scheduler/filters/io_ops_filter.py b/nova/scheduler/filters/io_ops_filter.py
index 1b40bae62..3f04815a0 100644
--- a/nova/scheduler/filters/io_ops_filter.py
+++ b/nova/scheduler/filters/io_ops_filter.py
@@ -14,7 +14,6 @@
# under the License.
from nova import config
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.scheduler import filters
diff --git a/nova/scheduler/filters/isolated_hosts_filter.py b/nova/scheduler/filters/isolated_hosts_filter.py
index d1d16b80b..213953b18 100644
--- a/nova/scheduler/filters/isolated_hosts_filter.py
+++ b/nova/scheduler/filters/isolated_hosts_filter.py
@@ -14,7 +14,6 @@
# under the License.
from nova import config
-from nova import flags
from nova.scheduler import filters
CONF = config.CONF
diff --git a/nova/scheduler/filters/num_instances_filter.py b/nova/scheduler/filters/num_instances_filter.py
index 17c7ebc22..b34e71526 100644
--- a/nova/scheduler/filters/num_instances_filter.py
+++ b/nova/scheduler/filters/num_instances_filter.py
@@ -14,7 +14,6 @@
# under the License.
from nova import config
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.scheduler import filters
diff --git a/nova/scheduler/filters/ram_filter.py b/nova/scheduler/filters/ram_filter.py
index 85370dc2c..832a7c41a 100644
--- a/nova/scheduler/filters/ram_filter.py
+++ b/nova/scheduler/filters/ram_filter.py
@@ -15,7 +15,6 @@
# under the License.
from nova import config
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.scheduler import filters
diff --git a/nova/scheduler/filters/trusted_filter.py b/nova/scheduler/filters/trusted_filter.py
index 4fd0488d9..1e0470b61 100644
--- a/nova/scheduler/filters/trusted_filter.py
+++ b/nova/scheduler/filters/trusted_filter.py
@@ -49,7 +49,6 @@ import socket
import ssl
from nova import config
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
diff --git a/nova/scheduler/host_manager.py b/nova/scheduler/host_manager.py
index 91e16ad34..ba4fa3d34 100644
--- a/nova/scheduler/host_manager.py
+++ b/nova/scheduler/host_manager.py
@@ -24,15 +24,15 @@ from nova.compute import vm_states
from nova import config
from nova import db
from nova import exception
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova.scheduler import filters
+from nova.scheduler import weights
host_manager_opts = [
cfg.MultiStrOpt('scheduler_available_filters',
- default=['nova.scheduler.filters.standard_filters'],
+ default=['nova.scheduler.filters.all_filters'],
help='Filter classes available to the scheduler which may '
'be specified more than once. An entry of '
'"nova.scheduler.filters.standard_filters" '
@@ -48,6 +48,9 @@ host_manager_opts = [
],
help='Which filter class names to use for filtering hosts '
'when not specified in the request.'),
+ cfg.ListOpt('scheduler_weight_classes',
+ default=['nova.scheduler.weights.all_weighers'],
+ help='Which weight class names to use for weighing hosts'),
]
CONF = config.CONF
@@ -94,10 +97,10 @@ class HostState(object):
previously used and lock down access.
"""
- def __init__(self, host, topic, capabilities=None, service=None):
+ def __init__(self, host, node, capabilities=None, service=None):
self.host = host
- self.topic = topic
- self.update_capabilities(topic, capabilities, service)
+ self.nodename = node
+ self.update_capabilities(capabilities, service)
# Mutable available resources.
# These will change as resources are virtually "consumed".
@@ -126,12 +129,12 @@ class HostState(object):
self.updated = None
- def update_capabilities(self, topic, capabilities=None, service=None):
+ def update_capabilities(self, capabilities=None, service=None):
# Read-only capability dicts
if capabilities is None:
capabilities = {}
- self.capabilities = ReadOnlyDict(capabilities.get(topic, None))
+ self.capabilities = ReadOnlyDict(capabilities)
if service is None:
service = {}
self.service = ReadOnlyDict(service)
@@ -240,35 +243,9 @@ class HostState(object):
def _statmap(self, stats):
return dict((st['key'], st['value']) for st in stats)
- def passes_filters(self, filter_fns, filter_properties):
- """Return whether or not this host passes filters."""
-
- if self.host in filter_properties.get('ignore_hosts', []):
- LOG.debug(_('Host filter fails for ignored host %(host)s'),
- {'host': self.host})
- return False
-
- force_hosts = filter_properties.get('force_hosts', [])
- if force_hosts:
- if not self.host in force_hosts:
- LOG.debug(_('Host filter fails for non-forced host %(host)s'),
- {'host': self.host})
- return self.host in force_hosts
-
- for filter_fn in filter_fns:
- if not filter_fn(self, filter_properties):
- LOG.debug(_('Host filter function %(func)s failed for '
- '%(host)s'),
- {'func': repr(filter_fn),
- 'host': self.host})
- return False
-
- LOG.debug(_('Host filter passes for %(host)s'), {'host': self.host})
- return True
-
def __repr__(self):
- return ("%s ram:%s disk:%s io_ops:%s instances:%s vm_type:%s" %
- (self.host, self.free_ram_mb, self.free_disk_mb,
+ return ("(%s, %s) ram:%s disk:%s io_ops:%s instances:%s vm_type:%s" %
+ (self.host, self.nodename, self.free_ram_mb, self.free_disk_mb,
self.num_io_ops, self.num_instances, self.allowed_vm_type))
@@ -279,34 +256,34 @@ class HostManager(object):
host_state_cls = HostState
def __init__(self):
- self.service_states = {} # { <host> : { <service> : { cap k : v }}}
+ # { (host, hypervisor_hostname) : { <service> : { cap k : v }}}
+ self.service_states = {}
self.host_state_map = {}
- self.filter_classes = filters.get_filter_classes(
+ self.filter_handler = filters.HostFilterHandler()
+ self.filter_classes = self.filter_handler.get_matching_classes(
CONF.scheduler_available_filters)
+ self.weight_handler = weights.HostWeightHandler()
+ self.weight_classes = self.weight_handler.get_matching_classes(
+ CONF.scheduler_weight_classes)
- def _choose_host_filters(self, filters):
+ def _choose_host_filters(self, filter_cls_names):
"""Since the caller may specify which filters to use we need
to have an authoritative list of what is permissible. This
function checks the filter names against a predefined set
of acceptable filters.
"""
- if filters is None:
- filters = CONF.scheduler_default_filters
- if not isinstance(filters, (list, tuple)):
- filters = [filters]
+ if filter_cls_names is None:
+ filter_cls_names = CONF.scheduler_default_filters
+ if not isinstance(filter_cls_names, (list, tuple)):
+ filter_cls_names = [filter_cls_names]
good_filters = []
bad_filters = []
- for filter_name in filters:
+ for filter_name in filter_cls_names:
found_class = False
for cls in self.filter_classes:
if cls.__name__ == filter_name:
+ good_filters.append(cls)
found_class = True
- filter_instance = cls()
- # Get the filter function
- filter_func = getattr(filter_instance,
- 'host_passes', None)
- if filter_func:
- good_filters.append(filter_func)
break
if not found_class:
bad_filters.append(filter_name)
@@ -315,42 +292,63 @@ class HostManager(object):
raise exception.SchedulerHostFilterNotFound(filter_name=msg)
return good_filters
- def filter_hosts(self, hosts, filter_properties, filters=None):
+ def get_filtered_hosts(self, hosts, filter_properties,
+ filter_class_names=None):
"""Filter hosts and return only ones passing all filters"""
- filtered_hosts = []
- filter_fns = self._choose_host_filters(filters)
- for host in hosts:
- if host.passes_filters(filter_fns, filter_properties):
- filtered_hosts.append(host)
- return filtered_hosts
+ filter_classes = self._choose_host_filters(filter_class_names)
+
+ hosts = set(hosts)
+ ignore_hosts = set(filter_properties.get('ignore_hosts', []))
+ ignore_hosts = hosts & ignore_hosts
+ if ignore_hosts:
+ ignored_hosts = ', '.join(ignore_hosts)
+ msg = _('Host filter ignoring hosts: %(ignored_hosts)s')
+ LOG.debug(msg, locals())
+ hosts = hosts - ignore_hosts
+
+ force_hosts = set(filter_properties.get('force_hosts', []))
+ if force_hosts:
+ matching_force_hosts = hosts & force_hosts
+ if not matching_force_hosts:
+ forced_hosts = ', '.join(force_hosts)
+ msg = _("No hosts matched due to not matching 'force_hosts'"
+ "value of '%(forced_hosts)s'")
+ LOG.debug(msg, locals())
+ return []
+ forced_hosts = ', '.join(matching_force_hosts)
+ msg = _('Host filter forcing available hosts to %(forced_hosts)s')
+ LOG.debug(msg, locals())
+ hosts = matching_force_hosts
+
+ return self.filter_handler.get_filtered_objects(filter_classes,
+ hosts, filter_properties)
+
+ def get_weighed_hosts(self, hosts, weight_properties):
+ """Weigh the hosts"""
+ return self.weight_handler.get_weighed_objects(self.weight_classes,
+ hosts, weight_properties)
def update_service_capabilities(self, service_name, host, capabilities):
"""Update the per-service capabilities based on this notification."""
+
+ if service_name != 'compute':
+ LOG.debug(_('Ignoring %(service_name)s service update '
+ 'from %(host)s'), locals())
+ return
+
+ state_key = (host, capabilities.get('hypervisor_hostname'))
LOG.debug(_("Received %(service_name)s service update from "
- "%(host)s.") % locals())
- service_caps = self.service_states.get(host, {})
+ "%(state_key)s.") % locals())
# Copy the capabilities, so we don't modify the original dict
capab_copy = dict(capabilities)
capab_copy["timestamp"] = timeutils.utcnow() # Reported time
- service_caps[service_name] = capab_copy
- self.service_states[host] = service_caps
-
- def get_all_host_states(self, context, topic):
- """Returns a dict of all the hosts the HostManager
- knows about. Also, each of the consumable resources in HostState
- are pre-populated and adjusted based on data in the db.
+ self.service_states[state_key] = capab_copy
- For example:
- {'192.168.1.100': HostState(), ...}
-
- Note: this can be very slow with a lot of instances.
- InstanceType table isn't required since a copy is stored
- with the instance (in case the InstanceType changed since the
- instance was created)."""
-
- if topic != CONF.compute_topic:
- raise NotImplementedError(_(
- "host_manager only implemented for 'compute'"))
+ def get_all_host_states(self, context):
+ """Returns a list of HostStates that represents all the hosts
+ the HostManager knows about. Also, each of the consumable resources
+ in HostState are pre-populated and adjusted based on data in the db.
+ """
# Get resource usage across the available compute nodes:
compute_nodes = db.compute_node_get_all(context)
@@ -360,16 +358,18 @@ class HostManager(object):
LOG.warn(_("No service for compute ID %s") % compute['id'])
continue
host = service['host']
- capabilities = self.service_states.get(host, None)
- host_state = self.host_state_map.get(host)
+ node = compute.get('hypervisor_hostname')
+ state_key = (host, node)
+ capabilities = self.service_states.get(state_key, None)
+ host_state = self.host_state_map.get(state_key)
if host_state:
- host_state.update_capabilities(topic, capabilities,
+ host_state.update_capabilities(capabilities,
dict(service.iteritems()))
else:
- host_state = self.host_state_cls(host, topic,
+ host_state = self.host_state_cls(host, node,
capabilities=capabilities,
service=dict(service.iteritems()))
- self.host_state_map[host] = host_state
+ self.host_state_map[state_key] = host_state
host_state.update_from_compute_node(compute)
- return self.host_state_map
+ return self.host_state_map.itervalues()
diff --git a/nova/scheduler/least_cost.py b/nova/scheduler/least_cost.py
deleted file mode 100644
index d3eaee735..000000000
--- a/nova/scheduler/least_cost.py
+++ /dev/null
@@ -1,118 +0,0 @@
-# Copyright (c) 2011 OpenStack, LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-Least Cost is an algorithm for choosing which host machines to
-provision a set of resources to. The input is a WeightedHost object which
-is decided upon by a set of objective-functions, called the 'cost-functions'.
-The WeightedHost contains a combined weight for each cost-function.
-
-The cost-function and weights are tabulated, and the host with the least cost
-is then selected for provisioning.
-"""
-
-from nova import config
-from nova import flags
-from nova.openstack.common import cfg
-from nova.openstack.common import log as logging
-
-
-LOG = logging.getLogger(__name__)
-
-least_cost_opts = [
- cfg.ListOpt('least_cost_functions',
- default=[
- 'nova.scheduler.least_cost.compute_fill_first_cost_fn'
- ],
- help='Which cost functions the LeastCostScheduler should use'),
- cfg.FloatOpt('noop_cost_fn_weight',
- default=1.0,
- help='How much weight to give the noop cost function'),
- cfg.FloatOpt('compute_fill_first_cost_fn_weight',
- default=-1.0,
- help='How much weight to give the fill-first cost function. '
- 'A negative value will reverse behavior: '
- 'e.g. spread-first'),
- ]
-
-CONF = config.CONF
-CONF.register_opts(least_cost_opts)
-
-# TODO(sirp): Once we have enough of these rules, we can break them out into a
-# cost_functions.py file (perhaps in a least_cost_scheduler directory)
-
-
-class WeightedHost(object):
- """Reduced set of information about a host that has been weighed.
- This is an attempt to remove some of the ad-hoc dict structures
- previously used."""
-
- def __init__(self, weight, host_state=None):
- self.weight = weight
- self.host_state = host_state
-
- def to_dict(self):
- x = dict(weight=self.weight)
- if self.host_state:
- x['host'] = self.host_state.host
- return x
-
- def __repr__(self):
- if self.host_state:
- return "WeightedHost host: %s" % self.host_state.host
- return "WeightedHost with no host_state"
-
-
-def noop_cost_fn(host_state, weighing_properties):
- """Return a pre-weight cost of 1 for each host"""
- return 1
-
-
-def compute_fill_first_cost_fn(host_state, weighing_properties):
- """More free ram = higher weight. So servers with less free
- ram will be preferred.
-
- Note: the weight for this function in default configuration
- is -1.0. With a -1.0 this function runs in reverse, so systems
- with the most free memory will be preferred.
- """
- return host_state.free_ram_mb
-
-
-def weighted_sum(weighted_fns, host_states, weighing_properties):
- """Use the weighted-sum method to compute a score for an array of objects.
-
- Normalize the results of the objective-functions so that the weights are
- meaningful regardless of objective-function's range.
-
- :param host_list: ``[(host, HostInfo()), ...]``
- :param weighted_fns: list of weights and functions like::
-
- [(weight, objective-functions), ...]
-
- :param weighing_properties: an arbitrary dict of values that can
- influence weights.
-
- :returns: a single WeightedHost object which represents the best
- candidate.
- """
-
- min_score, best_host = None, None
- for host_state in host_states:
- score = sum(weight * fn(host_state, weighing_properties)
- for weight, fn in weighted_fns)
- if min_score is None or score < min_score:
- min_score, best_host = score, host_state
-
- return WeightedHost(min_score, host_state=best_host)
diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py
index 77f5a0259..3ef2992fb 100644
--- a/nova/scheduler/manager.py
+++ b/nova/scheduler/manager.py
@@ -30,7 +30,6 @@ from nova import config
import nova.context
from nova import db
from nova import exception
-from nova import flags
from nova import manager
from nova import notifications
from nova.openstack.common import cfg
diff --git a/nova/scheduler/multi.py b/nova/scheduler/multi.py
index 7c68bb12a..988881632 100644
--- a/nova/scheduler/multi.py
+++ b/nova/scheduler/multi.py
@@ -28,7 +28,6 @@ https://bugs.launchpad.net/nova/+bug/1009681
"""
from nova import config
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.scheduler import driver
diff --git a/nova/scheduler/rpcapi.py b/nova/scheduler/rpcapi.py
index cbb6712db..b0c83d695 100644
--- a/nova/scheduler/rpcapi.py
+++ b/nova/scheduler/rpcapi.py
@@ -19,7 +19,6 @@ Client side of the scheduler manager RPC API.
"""
from nova import config
-from nova import flags
from nova.openstack.common import jsonutils
import nova.openstack.common.rpc.proxy
diff --git a/nova/scheduler/scheduler_options.py b/nova/scheduler/scheduler_options.py
index e8be0070b..5c253bc4f 100644
--- a/nova/scheduler/scheduler_options.py
+++ b/nova/scheduler/scheduler_options.py
@@ -27,7 +27,6 @@ import json
import os
from nova import config
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
diff --git a/nova/scheduler/weights/__init__.py b/nova/scheduler/weights/__init__.py
new file mode 100644
index 000000000..55c44b528
--- /dev/null
+++ b/nova/scheduler/weights/__init__.py
@@ -0,0 +1,61 @@
+# Copyright (c) 2011 OpenStack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Scheduler host weights
+"""
+
+
+from nova import config
+from nova.openstack.common import log as logging
+from nova.scheduler.weights import least_cost
+from nova import weights
+
+LOG = logging.getLogger(__name__)
+CONF = config.CONF
+
+
+class WeighedHost(weights.WeighedObject):
+ def to_dict(self):
+ x = dict(weight=self.weight)
+ x['host'] = self.obj.host
+ return x
+
+ def __repr__(self):
+ return "WeighedHost [host: %s, weight: %s]" % (
+ self.obj.host, self.weight)
+
+
+class BaseHostWeigher(weights.BaseWeigher):
+ """Base class for host weights."""
+ pass
+
+
+class HostWeightHandler(weights.BaseWeightHandler):
+ object_class = WeighedHost
+
+ def __init__(self):
+ super(HostWeightHandler, self).__init__(BaseHostWeigher)
+
+
+def all_weighers():
+ """Return a list of weight plugin classes found in this directory."""
+
+ if (CONF.least_cost_functions is not None or
+ CONF.compute_fill_first_cost_fn_weight is not None):
+ LOG.deprecated(_('least_cost has been deprecated in favor of '
+ 'the RAM Weigher.'))
+ return least_cost.get_least_cost_weighers()
+ return HostWeightHandler().get_all_classes()
diff --git a/nova/scheduler/weights/least_cost.py b/nova/scheduler/weights/least_cost.py
new file mode 100644
index 000000000..2d886f461
--- /dev/null
+++ b/nova/scheduler/weights/least_cost.py
@@ -0,0 +1,126 @@
+# Copyright (c) 2011-2012 OpenStack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Least Cost is an algorithm for choosing which host machines to
+provision a set of resources to. The input is a WeightedHost object which
+is decided upon by a set of objective-functions, called the 'cost-functions'.
+The WeightedHost contains a combined weight for each cost-function.
+
+The cost-function and weights are tabulated, and the host with the least cost
+is then selected for provisioning.
+
+NOTE(comstud): This is deprecated. One should use the RAMWeigher and/or
+create other weight modules.
+"""
+
+from nova import config
+from nova import exception
+from nova.openstack.common import cfg
+from nova.openstack.common import importutils
+from nova.openstack.common import log as logging
+
+
+LOG = logging.getLogger(__name__)
+
+least_cost_opts = [
+ cfg.ListOpt('least_cost_functions',
+ default=None,
+ help='Which cost functions the LeastCostScheduler should use'),
+ cfg.FloatOpt('noop_cost_fn_weight',
+ default=1.0,
+ help='How much weight to give the noop cost function'),
+ cfg.FloatOpt('compute_fill_first_cost_fn_weight',
+ default=None,
+ help='How much weight to give the fill-first cost function. '
+ 'A negative value will reverse behavior: '
+ 'e.g. spread-first'),
+ ]
+
+CONF = config.CONF
+CONF.register_opts(least_cost_opts)
+
+
+def noop_cost_fn(host_state, weight_properties):
+ """Return a pre-weight cost of 1 for each host"""
+ return 1
+
+
+def compute_fill_first_cost_fn(host_state, weight_properties):
+ """Higher weights win, so we should return a lower weight
+ when there's more free ram available.
+
+ Note: the weight modifier for this function in default configuration
+ is -1.0. With -1.0 this function runs in reverse, so systems
+ with the most free memory will be preferred.
+ """
+ return -host_state.free_ram_mb
+
+
+def _get_cost_functions():
+ """Returns a list of tuples containing weights and cost functions to
+ use for weighing hosts
+ """
+ cost_fns_conf = CONF.least_cost_functions
+ if cost_fns_conf is None:
+ # The old default. This will get fixed up below.
+ fn_str = 'nova.scheduler.least_cost.compute_fill_first_cost_fn'
+ cost_fns_conf = [fn_str]
+ cost_fns = []
+ for cost_fn_str in cost_fns_conf:
+ short_name = cost_fn_str.split('.')[-1]
+ if not (short_name.startswith('compute_') or
+ short_name.startswith('noop')):
+ continue
+ # Fix up any old paths to the new paths
+ if cost_fn_str.startswith('nova.scheduler.least_cost.'):
+ cost_fn_str = ('nova.scheduler.weights.least_cost' +
+ cost_fn_str[25:])
+ try:
+ # NOTE: import_class is somewhat misnamed since
+ # the weighing function can be any non-class callable
+ # (i.e., no 'self')
+ cost_fn = importutils.import_class(cost_fn_str)
+ except ImportError:
+ raise exception.SchedulerCostFunctionNotFound(
+ cost_fn_str=cost_fn_str)
+
+ try:
+ flag_name = "%s_weight" % cost_fn.__name__
+ weight = getattr(CONF, flag_name)
+ except AttributeError:
+ raise exception.SchedulerWeightFlagNotFound(
+ flag_name=flag_name)
+ # Set the original default.
+ if (flag_name == 'compute_fill_first_cost_fn_weight' and
+ weight is None):
+ weight = -1.0
+ cost_fns.append((weight, cost_fn))
+ return cost_fns
+
+
+def get_least_cost_weighers():
+ cost_functions = _get_cost_functions()
+
+ # Unfortunately we need to import this late so we don't have an
+ # import loop.
+ from nova.scheduler import weights
+
+ class _LeastCostWeigher(weights.BaseHostWeigher):
+ def weigh_objects(self, weighted_hosts, weight_properties):
+ for host in weighted_hosts:
+ host.weight = sum(weight * fn(host.obj, weight_properties)
+ for weight, fn in cost_functions)
+
+ return [_LeastCostWeigher]
diff --git a/nova/scheduler/weights/ram.py b/nova/scheduler/weights/ram.py
new file mode 100644
index 000000000..0fe1911c4
--- /dev/null
+++ b/nova/scheduler/weights/ram.py
@@ -0,0 +1,46 @@
+# Copyright (c) 2011 OpenStack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+RAM Weigher. Weigh hosts by their RAM usage.
+
+The default is to spread instances across all hosts evenly. If you prefer
+stacking, you can set the 'ram_weight_multiplier' option to a negative
+number and the weighing has the opposite effect of the default.
+"""
+
+from nova import config
+from nova.openstack.common import cfg
+from nova.scheduler import weights
+
+
+ram_weight_opts = [
+ cfg.FloatOpt('ram_weight_multiplier',
+ default=1.0,
+ help='Multiplier used for weighing ram. Negative '
+ 'numbers mean to stack vs spread.'),
+]
+
+CONF = config.CONF
+CONF.register_opts(ram_weight_opts)
+
+
+class RAMWeigher(weights.BaseHostWeigher):
+ def _weight_multiplier(self):
+ """Override the weight multiplier."""
+ return CONF.ram_weight_multiplier
+
+ def _weigh_object(self, host_state, weight_properties):
+ """Higher weights win. We want spreading to be the default."""
+ return host_state.free_ram_mb
diff --git a/nova/service.py b/nova/service.py
index 45bdf7ec0..109fbc06c 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -30,13 +30,12 @@ import time
import eventlet
import greenlet
-from nova.common import eventlet_backdoor
from nova import config
from nova import context
from nova import db
from nova import exception
-from nova import flags
from nova.openstack.common import cfg
+from nova.openstack.common import eventlet_backdoor
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
@@ -111,7 +110,7 @@ class Launcher(object):
"""
self._services = []
- eventlet_backdoor.initialize_if_enabled()
+ self.backdoor_port = eventlet_backdoor.initialize_if_enabled()
@staticmethod
def run_server(server):
@@ -131,6 +130,8 @@ class Launcher(object):
:returns: None
"""
+ if self.backdoor_port is not None:
+ server.backdoor_port = self.backdoor_port
gt = eventlet.spawn(self.run_server, server)
self._services.append(gt)
@@ -382,6 +383,7 @@ class Service(object):
self.periodic_fuzzy_delay = periodic_fuzzy_delay
self.saved_args, self.saved_kwargs = args, kwargs
self.timers = []
+ self.backdoor_port = None
def start(self):
vcs_string = version.version_string_with_vcs()
@@ -400,6 +402,9 @@ class Service(object):
self.manager.pre_start_hook()
+ if self.backdoor_port is not None:
+ self.manager.backdoor_port = self.backdoor_port
+
self.conn = rpc.create_connection(new=True)
LOG.debug(_("Creating Consumer connection for Service %s") %
self.topic)
@@ -578,6 +583,7 @@ class WSGIService(object):
port=self.port)
# Pull back actual port used
self.port = self.server.port
+ self.backdoor_port = None
def _get_manager(self):
"""Initialize a Manager object appropriate for this service.
@@ -612,6 +618,8 @@ class WSGIService(object):
if self.manager:
self.manager.init_host()
self.manager.pre_start_hook()
+ if self.backdoor_port is not None:
+ self.manager.backdoor_port = self.backdoor_port
self.server.start()
if self.manager:
self.manager.post_start_hook()
diff --git a/nova/test.py b/nova/test.py
index cd82d74e2..25572600f 100644
--- a/nova/test.py
+++ b/nova/test.py
@@ -23,17 +23,15 @@ inline callbacks.
"""
-import functools
import sys
import uuid
+from fixtures import EnvironmentVariable
import mox
-import nose.plugins.skip
import stubout
import testtools
from nova import config
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
@@ -51,8 +49,8 @@ test_opts = [
help='should we use everything for testing'),
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(test_opts)
+CONF = config.CONF
+CONF.register_opts(test_opts)
LOG = logging.getLogger(__name__)
@@ -68,7 +66,7 @@ class TestCase(testtools.TestCase):
"""Run before each test method to initialize test environment."""
super(TestCase, self).setUp()
- fake_flags.set_defaults(FLAGS)
+ fake_flags.set_defaults(CONF)
config.parse_args([], default_config_files=[])
# NOTE(vish): We need a better method for creating fixtures for tests
@@ -84,6 +82,7 @@ class TestCase(testtools.TestCase):
self.injected = []
self._services = []
self._modules = {}
+ self.useFixture(EnvironmentVariable('http_proxy'))
def tearDown(self):
"""Runs after each test method to tear down test environment."""
@@ -95,7 +94,7 @@ class TestCase(testtools.TestCase):
super(TestCase, self).tearDown()
finally:
# Reset any overridden flags
- FLAGS.reset()
+ CONF.reset()
# Unstub modules
for name, mod in self._modules.iteritems():
@@ -132,8 +131,9 @@ class TestCase(testtools.TestCase):
def flags(self, **kw):
"""Override flag variables for a test."""
+ group = kw.pop('group', None)
for k, v in kw.iteritems():
- FLAGS.set_override(k, v)
+ CONF.set_override(k, v, group)
def start_service(self, name, host=None, **kwargs):
host = host and host or uuid.uuid4().hex
@@ -143,86 +143,3 @@ class TestCase(testtools.TestCase):
svc.start()
self._services.append(svc)
return svc
-
- # Useful assertions
- def assertDictMatch(self, d1, d2, approx_equal=False, tolerance=0.001):
- """Assert two dicts are equivalent.
-
- This is a 'deep' match in the sense that it handles nested
- dictionaries appropriately.
-
- NOTE:
-
- If you don't care (or don't know) a given value, you can specify
- the string DONTCARE as the value. This will cause that dict-item
- to be skipped.
-
- """
- def raise_assertion(msg):
- d1str = str(d1)
- d2str = str(d2)
- base_msg = ('Dictionaries do not match. %(msg)s d1: %(d1str)s '
- 'd2: %(d2str)s' % locals())
- raise AssertionError(base_msg)
-
- d1keys = set(d1.keys())
- d2keys = set(d2.keys())
- if d1keys != d2keys:
- d1only = d1keys - d2keys
- d2only = d2keys - d1keys
- raise_assertion('Keys in d1 and not d2: %(d1only)s. '
- 'Keys in d2 and not d1: %(d2only)s' % locals())
-
- for key in d1keys:
- d1value = d1[key]
- d2value = d2[key]
- try:
- error = abs(float(d1value) - float(d2value))
- within_tolerance = error <= tolerance
- except (ValueError, TypeError):
- # If both values aren't convertible to float, just ignore
- # ValueError if arg is a str, TypeError if it's something else
- # (like None)
- within_tolerance = False
-
- if hasattr(d1value, 'keys') and hasattr(d2value, 'keys'):
- self.assertDictMatch(d1value, d2value)
- elif 'DONTCARE' in (d1value, d2value):
- continue
- elif approx_equal and within_tolerance:
- continue
- elif d1value != d2value:
- raise_assertion("d1['%(key)s']=%(d1value)s != "
- "d2['%(key)s']=%(d2value)s" % locals())
-
- def assertDictListMatch(self, L1, L2, approx_equal=False, tolerance=0.001):
- """Assert a list of dicts are equivalent."""
- def raise_assertion(msg):
- L1str = str(L1)
- L2str = str(L2)
- base_msg = ('List of dictionaries do not match: %(msg)s '
- 'L1: %(L1str)s L2: %(L2str)s' % locals())
- raise AssertionError(base_msg)
-
- L1count = len(L1)
- L2count = len(L2)
- if L1count != L2count:
- raise_assertion('Length mismatch: len(L1)=%(L1count)d != '
- 'len(L2)=%(L2count)d' % locals())
-
- for d1, d2 in zip(L1, L2):
- self.assertDictMatch(d1, d2, approx_equal=approx_equal,
- tolerance=tolerance)
-
- def assertSubDictMatch(self, sub_dict, super_dict):
- """Assert a sub_dict is subset of super_dict."""
- self.assertEqual(True,
- set(sub_dict.keys()).issubset(set(super_dict.keys())))
- for k, sub_value in sub_dict.items():
- super_value = super_dict[k]
- if isinstance(sub_value, dict):
- self.assertSubDictMatch(sub_value, super_value)
- elif 'DONTCARE' in (sub_value, super_value):
- continue
- else:
- self.assertEqual(sub_value, super_value)
diff --git a/nova/tests/__init__.py b/nova/tests/__init__.py
index ee2d17afa..f0c117b32 100644
--- a/nova/tests/__init__.py
+++ b/nova/tests/__init__.py
@@ -31,8 +31,8 @@ setattr(__builtin__, '_', lambda x: x)
import os
import shutil
+from nova import config
from nova.db.sqlalchemy.session import get_engine
-from nova import flags
from nova.openstack.common import log as logging
import eventlet
@@ -40,8 +40,8 @@ import eventlet
eventlet.monkey_patch(os=False)
-FLAGS = flags.FLAGS
-FLAGS.use_stderr = False
+CONF = config.CONF
+CONF.set_override('use_stderr', False)
logging.setup('nova')
@@ -49,7 +49,7 @@ _DB = None
def reset_db():
- if FLAGS.sql_connection == "sqlite://":
+ if CONF.sql_connection == "sqlite://":
engine = get_engine()
engine.dispose()
conn = engine.connect()
@@ -58,8 +58,8 @@ def reset_db():
else:
setup()
else:
- shutil.copyfile(os.path.join(FLAGS.state_path, FLAGS.sqlite_clean_db),
- os.path.join(FLAGS.state_path, FLAGS.sqlite_db))
+ shutil.copyfile(os.path.join(CONF.state_path, CONF.sqlite_clean_db),
+ os.path.join(CONF.state_path, CONF.sqlite_db))
def setup():
@@ -70,41 +70,41 @@ def setup():
from nova.db import migration
from nova.network import manager as network_manager
from nova.tests import fake_flags
- fake_flags.set_defaults(FLAGS)
+ fake_flags.set_defaults(CONF)
- if FLAGS.sql_connection == "sqlite://":
+ if CONF.sql_connection == "sqlite://":
if migration.db_version() > migration.INIT_VERSION:
return
else:
- testdb = os.path.join(FLAGS.state_path, FLAGS.sqlite_db)
+ testdb = os.path.join(CONF.state_path, CONF.sqlite_db)
if os.path.exists(testdb):
return
migration.db_sync()
ctxt = context.get_admin_context()
network = network_manager.VlanManager()
- bridge_interface = FLAGS.flat_interface or FLAGS.vlan_interface
+ bridge_interface = CONF.flat_interface or CONF.vlan_interface
network.create_networks(ctxt,
label='test',
- cidr=FLAGS.fixed_range,
- multi_host=FLAGS.multi_host,
- num_networks=FLAGS.num_networks,
- network_size=FLAGS.network_size,
- cidr_v6=FLAGS.fixed_range_v6,
- gateway=FLAGS.gateway,
- gateway_v6=FLAGS.gateway_v6,
- bridge=FLAGS.flat_network_bridge,
+ cidr=CONF.fixed_range,
+ multi_host=CONF.multi_host,
+ num_networks=CONF.num_networks,
+ network_size=CONF.network_size,
+ cidr_v6=CONF.fixed_range_v6,
+ gateway=CONF.gateway,
+ gateway_v6=CONF.gateway_v6,
+ bridge=CONF.flat_network_bridge,
bridge_interface=bridge_interface,
- vpn_start=FLAGS.vpn_start,
- vlan_start=FLAGS.vlan_start,
- dns1=FLAGS.flat_network_dns)
+ vpn_start=CONF.vpn_start,
+ vlan_start=CONF.vlan_start,
+ dns1=CONF.flat_network_dns)
for net in db.network_get_all(ctxt):
network.set_network_host(ctxt, net)
- if FLAGS.sql_connection == "sqlite://":
+ if CONF.sql_connection == "sqlite://":
global _DB
engine = get_engine()
conn = engine.connect()
_DB = "".join(line for line in conn.connection.iterdump())
else:
- cleandb = os.path.join(FLAGS.state_path, FLAGS.sqlite_clean_db)
+ cleandb = os.path.join(CONF.state_path, CONF.sqlite_clean_db)
shutil.copyfile(testdb, cleandb)
diff --git a/nova/tests/api/ec2/test_cinder_cloud.py b/nova/tests/api/ec2/test_cinder_cloud.py
index 13e854077..cbdfc3aee 100644
--- a/nova/tests/api/ec2/test_cinder_cloud.py
+++ b/nova/tests/api/ec2/test_cinder_cloud.py
@@ -24,20 +24,20 @@ from nova.api.ec2 import cloud
from nova.api.ec2 import ec2utils
from nova.compute import api as compute_api
from nova.compute import utils as compute_utils
+from nova import config
from nova import context
from nova import db
from nova import exception
-from nova import flags
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
from nova import test
from nova.tests import fake_network
from nova.tests.image import fake
+from nova.tests import matchers
from nova import volume
-
+CONF = config.CONF
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
def get_fake_cache():
@@ -59,7 +59,7 @@ def get_fake_cache():
floats=['1.2.3.4',
'5.6.7.8']),
_ip('192.168.0.4')]}]}}]
- if FLAGS.use_ipv6:
+ if CONF.use_ipv6:
ipv6_addr = 'fe80:b33f::a8bb:ccff:fedd:eeff'
info[0]['network']['subnets'].append({'cidr': 'fe80:b33f::/64',
'ips': [_ip(ipv6_addr)]})
@@ -434,18 +434,18 @@ class CinderCloudTestCase(test.TestCase):
result = {}
self.cloud._format_instance_bdm(self.context, inst1['uuid'],
'/dev/sdb1', result)
- self.assertSubDictMatch(
+ self.assertThat(
{'rootDeviceType': self._expected_instance_bdm1['rootDeviceType']},
- result)
+ matchers.IsSubDictOf(result))
self._assertEqualBlockDeviceMapping(
self._expected_block_device_mapping0, result['blockDeviceMapping'])
result = {}
self.cloud._format_instance_bdm(self.context, inst2['uuid'],
'/dev/sdc1', result)
- self.assertSubDictMatch(
+ self.assertThat(
{'rootDeviceType': self._expected_instance_bdm2['rootDeviceType']},
- result)
+ matchers.IsSubDictOf(result))
self._tearDownBlockDeviceMapping(inst1, inst2, volumes)
@@ -465,7 +465,7 @@ class CinderCloudTestCase(test.TestCase):
found = False
for y in result:
if x['deviceName'] == y['deviceName']:
- self.assertSubDictMatch(x, y)
+ self.assertThat(x, matchers.IsSubDictOf(y))
found = True
break
self.assertTrue(found)
@@ -477,24 +477,19 @@ class CinderCloudTestCase(test.TestCase):
(inst1, inst2, volumes) = self._setUpBlockDeviceMapping()
result = self._assertInstance(inst1['id'])
- self.assertSubDictMatch(self._expected_instance_bdm1, result)
+ self.assertThat(
+ self._expected_instance_bdm1,
+ matchers.IsSubDictOf(result))
self._assertEqualBlockDeviceMapping(
self._expected_block_device_mapping0, result['blockDeviceMapping'])
result = self._assertInstance(inst2['id'])
- self.assertSubDictMatch(self._expected_instance_bdm2, result)
+ self.assertThat(
+ self._expected_instance_bdm2,
+ matchers.IsSubDictOf(result))
self._tearDownBlockDeviceMapping(inst1, inst2, volumes)
- def assertDictListUnorderedMatch(self, L1, L2, key):
- self.assertEqual(len(L1), len(L2))
- for d1 in L1:
- self.assertTrue(key in d1)
- for d2 in L2:
- self.assertTrue(key in d2)
- if d1[key] == d2[key]:
- self.assertDictMatch(d1, d2)
-
def _setUpImageSet(self, create_volumes_and_snapshots=False):
mappings1 = [
{'device': '/dev/sda1', 'virtual': 'root'},
@@ -668,7 +663,7 @@ class CinderCloudTestCase(test.TestCase):
self._restart_compute_service(periodic_interval=0.3)
kwargs = {'image_id': 'ami-1',
- 'instance_type': FLAGS.default_instance_type,
+ 'instance_type': CONF.default_instance_type,
'max_count': 1,
'block_device_mapping': [{'device_name': '/dev/sdb',
'volume_id': vol1_uuid,
@@ -678,8 +673,8 @@ class CinderCloudTestCase(test.TestCase):
'delete_on_termination': True},
]}
ec2_instance_id = self._run_instance(**kwargs)
- instance_uuid = ec2utils.ec2_instance_id_to_uuid(self.context,
- ec2_instance_id)
+ instance_uuid = ec2utils.ec2_inst_id_to_uuid(self.context,
+ ec2_instance_id)
vols = self.volume_api.get_all(self.context)
vols = [v for v in vols if v['instance_uuid'] == instance_uuid]
@@ -750,15 +745,14 @@ class CinderCloudTestCase(test.TestCase):
# enforce periodic tasks run in short time to avoid wait for 60s.
self._restart_compute_service(periodic_interval=0.3)
kwargs = {'image_id': 'ami-1',
- 'instance_type': FLAGS.default_instance_type,
+ 'instance_type': CONF.default_instance_type,
'max_count': 1,
'block_device_mapping': [{'device_name': '/dev/sdb',
'volume_id': vol1_uuid,
'delete_on_termination': True}]}
ec2_instance_id = self._run_instance(**kwargs)
- instance_id = ec2utils.ec2_id_to_id(ec2_instance_id)
- instance_uuid = ec2utils.ec2_instance_id_to_uuid(self.context,
- ec2_instance_id)
+ instance_uuid = ec2utils.ec2_inst_id_to_uuid(self.context,
+ ec2_instance_id)
vols = self.volume_api.get_all(self.context)
vols = [v for v in vols if v['instance_uuid'] == instance_uuid]
@@ -769,7 +763,7 @@ class CinderCloudTestCase(test.TestCase):
vol = self.volume_api.get(self.context, vol2_uuid)
self._assert_volume_detached(vol)
- instance = db.instance_get(self.context, instance_id)
+ instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.cloud.compute_api.attach_volume(self.context,
instance,
volume_id=vol2_uuid,
@@ -832,7 +826,7 @@ class CinderCloudTestCase(test.TestCase):
snap2_uuid = ec2utils.ec2_snap_id_to_uuid(snap2['snapshotId'])
kwargs = {'image_id': 'ami-1',
- 'instance_type': FLAGS.default_instance_type,
+ 'instance_type': CONF.default_instance_type,
'max_count': 1,
'block_device_mapping': [{'device_name': '/dev/vdb',
'snapshot_id': snap1_uuid,
@@ -841,8 +835,8 @@ class CinderCloudTestCase(test.TestCase):
'snapshot_id': snap2_uuid,
'delete_on_termination': True}]}
ec2_instance_id = self._run_instance(**kwargs)
- instance_uuid = ec2utils.ec2_instance_id_to_uuid(self.context,
- ec2_instance_id)
+ instance_uuid = ec2utils.ec2_inst_id_to_uuid(self.context,
+ ec2_instance_id)
vols = self.volume_api.get_all(self.context)
vols = [v for v in vols if v['instance_uuid'] == instance_uuid]
@@ -892,7 +886,7 @@ class CinderCloudTestCase(test.TestCase):
create_volumes_and_snapshots=True)
kwargs = {'image_id': 'ami-1',
- 'instance_type': FLAGS.default_instance_type,
+ 'instance_type': CONF.default_instance_type,
'max_count': 1}
ec2_instance_id = self._run_instance(**kwargs)
diff --git a/nova/tests/api/ec2/test_cloud.py b/nova/tests/api/ec2/test_cloud.py
index 95003ee87..bdf2cbe7c 100644
--- a/nova/tests/api/ec2/test_cloud.py
+++ b/nova/tests/api/ec2/test_cloud.py
@@ -32,10 +32,10 @@ from nova.compute import api as compute_api
from nova.compute import power_state
from nova.compute import utils as compute_utils
from nova.compute import vm_states
+from nova import config
from nova import context
from nova import db
from nova import exception
-from nova import flags
from nova.image import s3
from nova.network import api as network_api
from nova.openstack.common import log as logging
@@ -43,13 +43,13 @@ from nova.openstack.common import rpc
from nova import test
from nova.tests import fake_network
from nova.tests.image import fake
+from nova.tests import matchers
from nova import utils
from nova.virt import fake as fake_virt
from nova import volume
-
+CONF = config.CONF
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
def get_fake_cache():
@@ -71,7 +71,7 @@ def get_fake_cache():
floats=['1.2.3.4',
'5.6.7.8']),
_ip('192.168.0.4')]}]}}]
- if FLAGS.use_ipv6:
+ if CONF.use_ipv6:
ipv6_addr = 'fe80:b33f::a8bb:ccff:fedd:eeff'
info[0]['network']['subnets'].append({'cidr': 'fe80:b33f::/64',
'ips': [_ip(ipv6_addr)]})
@@ -94,10 +94,8 @@ def get_instances_with_cached_ips(orig_func, *args, **kwargs):
class CloudTestCase(test.TestCase):
def setUp(self):
super(CloudTestCase, self).setUp()
- vol_tmpdir = tempfile.mkdtemp()
self.flags(compute_driver='nova.virt.fake.FakeDriver',
- volume_api_class='nova.tests.fake_volume.API',
- volumes_dir=vol_tmpdir)
+ volume_api_class='nova.tests.fake_volume.API')
def fake_show(meh, context, id):
return {'id': id,
@@ -259,7 +257,7 @@ class CloudTestCase(test.TestCase):
project_id=project_id)
fixed_ips = nw_info.fixed_ips()
- ec2_id = ec2utils.id_to_ec2_inst_id(inst['id'])
+ ec2_id = ec2utils.id_to_ec2_inst_id(inst['uuid'])
self.stubs.Set(ec2utils, 'get_ip_info_for_instance',
lambda *args: {'fixed_ips': ['10.0.0.1'],
@@ -381,7 +379,7 @@ class CloudTestCase(test.TestCase):
def test_security_group_quota_limit(self):
self.flags(quota_security_groups=10)
- for i in range(1, FLAGS.quota_security_groups + 1):
+ for i in range(1, CONF.quota_security_groups + 1):
name = 'test name %i' % i
descript = 'test description %i' % i
create = self.cloud.create_security_group
@@ -963,7 +961,7 @@ class CloudTestCase(test.TestCase):
for d2 in L2:
self.assertTrue(key in d2)
if d1[key] == d2[key]:
- self.assertDictMatch(d1, d2)
+ self.assertThat(d1, matchers.DictMatches(d2))
def _setUpImageSet(self, create_volumes_and_snapshots=False):
mappings1 = [
@@ -1283,17 +1281,17 @@ class CloudTestCase(test.TestCase):
'imageType': 'machine',
'description': None}
result = self.cloud._format_image(image)
- self.assertDictMatch(result, expected)
+ self.assertThat(result, matchers.DictMatches(expected))
image['properties']['image_location'] = None
expected['imageLocation'] = 'None (name)'
result = self.cloud._format_image(image)
- self.assertDictMatch(result, expected)
+ self.assertThat(result, matchers.DictMatches(expected))
image['name'] = None
image['properties']['image_location'] = 'location'
expected['imageLocation'] = 'location'
expected['name'] = 'location'
result = self.cloud._format_image(image)
- self.assertDictMatch(result, expected)
+ self.assertThat(result, matchers.DictMatches(expected))
def test_deregister_image(self):
deregister_image = self.cloud.deregister_image
@@ -1333,7 +1331,7 @@ class CloudTestCase(test.TestCase):
def test_console_output(self):
instance_id = self._run_instance(
image_id='ami-1',
- instance_type=FLAGS.default_instance_type,
+ instance_type=CONF.default_instance_type,
max_count=1)
output = self.cloud.get_console_output(context=self.context,
instance_id=[instance_id])
@@ -1446,7 +1444,7 @@ class CloudTestCase(test.TestCase):
def test_run_instances(self):
kwargs = {'image_id': 'ami-00000001',
- 'instance_type': FLAGS.default_instance_type,
+ 'instance_type': CONF.default_instance_type,
'max_count': 1}
run_instances = self.cloud.run_instances
@@ -1478,7 +1476,7 @@ class CloudTestCase(test.TestCase):
def test_run_instances_availability_zone(self):
kwargs = {'image_id': 'ami-00000001',
- 'instance_type': FLAGS.default_instance_type,
+ 'instance_type': CONF.default_instance_type,
'max_count': 1,
'placement': {'availability_zone': 'fake'},
}
@@ -1514,7 +1512,7 @@ class CloudTestCase(test.TestCase):
def test_run_instances_image_state_none(self):
kwargs = {'image_id': 'ami-00000001',
- 'instance_type': FLAGS.default_instance_type,
+ 'instance_type': CONF.default_instance_type,
'max_count': 1}
run_instances = self.cloud.run_instances
@@ -1533,7 +1531,7 @@ class CloudTestCase(test.TestCase):
def test_run_instances_image_state_invalid(self):
kwargs = {'image_id': 'ami-00000001',
- 'instance_type': FLAGS.default_instance_type,
+ 'instance_type': CONF.default_instance_type,
'max_count': 1}
run_instances = self.cloud.run_instances
@@ -1553,8 +1551,8 @@ class CloudTestCase(test.TestCase):
self.context, **kwargs)
def test_run_instances_image_status_active(self):
- kwargs = {'image_id': FLAGS.default_image,
- 'instance_type': FLAGS.default_instance_type,
+ kwargs = {'image_id': CONF.default_image,
+ 'instance_type': CONF.default_instance_type,
'max_count': 1}
run_instances = self.cloud.run_instances
@@ -1593,7 +1591,7 @@ class CloudTestCase(test.TestCase):
self._restart_compute_service(periodic_interval=0.3)
kwargs = {'image_id': 'ami-1',
- 'instance_type': FLAGS.default_instance_type,
+ 'instance_type': CONF.default_instance_type,
'max_count': 1, }
instance_id = self._run_instance(**kwargs)
@@ -1622,7 +1620,7 @@ class CloudTestCase(test.TestCase):
def test_start_instances(self):
kwargs = {'image_id': 'ami-1',
- 'instance_type': FLAGS.default_instance_type,
+ 'instance_type': CONF.default_instance_type,
'max_count': 1, }
instance_id = self._run_instance(**kwargs)
@@ -1644,7 +1642,7 @@ class CloudTestCase(test.TestCase):
def test_stop_instances(self):
kwargs = {'image_id': 'ami-1',
- 'instance_type': FLAGS.default_instance_type,
+ 'instance_type': CONF.default_instance_type,
'max_count': 1, }
instance_id = self._run_instance(**kwargs)
@@ -1663,7 +1661,7 @@ class CloudTestCase(test.TestCase):
def test_terminate_instances(self):
kwargs = {'image_id': 'ami-1',
- 'instance_type': FLAGS.default_instance_type,
+ 'instance_type': CONF.default_instance_type,
'max_count': 1, }
instance_id = self._run_instance(**kwargs)
@@ -1684,7 +1682,7 @@ class CloudTestCase(test.TestCase):
def test_terminate_instances_invalid_instance_id(self):
kwargs = {'image_id': 'ami-1',
- 'instance_type': FLAGS.default_instance_type,
+ 'instance_type': CONF.default_instance_type,
'max_count': 1, }
instance_id = self._run_instance(**kwargs)
@@ -1695,7 +1693,7 @@ class CloudTestCase(test.TestCase):
def test_terminate_instances_disable_terminate(self):
kwargs = {'image_id': 'ami-1',
- 'instance_type': FLAGS.default_instance_type,
+ 'instance_type': CONF.default_instance_type,
'max_count': 1, }
instance_id = self._run_instance(**kwargs)
@@ -1728,7 +1726,7 @@ class CloudTestCase(test.TestCase):
def test_terminate_instances_two_instances(self):
kwargs = {'image_id': 'ami-1',
- 'instance_type': FLAGS.default_instance_type,
+ 'instance_type': CONF.default_instance_type,
'max_count': 1, }
inst1 = self._run_instance(**kwargs)
inst2 = self._run_instance(**kwargs)
@@ -1753,7 +1751,7 @@ class CloudTestCase(test.TestCase):
def test_reboot_instances(self):
kwargs = {'image_id': 'ami-1',
- 'instance_type': FLAGS.default_instance_type,
+ 'instance_type': CONF.default_instance_type,
'max_count': 1, }
instance_id = self._run_instance(**kwargs)
@@ -1799,7 +1797,7 @@ class CloudTestCase(test.TestCase):
create_volumes_and_snapshots=True)
kwargs = {'image_id': 'ami-1',
- 'instance_type': FLAGS.default_instance_type,
+ 'instance_type': CONF.default_instance_type,
'max_count': 1}
ec2_instance_id = self._run_instance(**kwargs)
@@ -1907,7 +1905,7 @@ class CloudTestCase(test.TestCase):
create_volumes_and_snapshots=True)
kwargs = {'image_id': 'ami-1',
- 'instance_type': FLAGS.default_instance_type,
+ 'instance_type': CONF.default_instance_type,
'max_count': 1}
ec2_instance_id = self._run_instance(**kwargs)
@@ -2015,13 +2013,12 @@ class CloudTestCase(test.TestCase):
}
self.stubs.Set(self.cloud.compute_api, 'get', fake_get)
- def fake_volume_get(ctxt, volume_id, session=None):
- if volume_id == 87654321:
- return {'id': volume_id,
- 'attach_time': '13:56:24',
- 'status': 'in-use'}
- raise exception.VolumeNotFound(volume_id=volume_id)
- self.stubs.Set(db, 'volume_get', fake_volume_get)
+ def fake_get_instance_uuid_by_ec2_id(ctxt, int_id):
+ if int_id == 305419896:
+ return 'e5fe5518-0288-4fa3-b0c4-c79764101b85'
+ raise exception.InstanceNotFound(instance_id=int_id)
+ self.stubs.Set(db, 'get_instance_uuid_by_ec2_id',
+ fake_get_instance_uuid_by_ec2_id)
get_attribute = functools.partial(
self.cloud.describe_instance_attribute,
@@ -2075,7 +2072,7 @@ class CloudTestCase(test.TestCase):
def test_dia_iisb(expected_result, **kwargs):
"""test describe_instance_attribute
attribute instance_initiated_shutdown_behavior"""
- kwargs.update({'instance_type': FLAGS.default_instance_type,
+ kwargs.update({'instance_type': CONF.default_instance_type,
'max_count': 1})
instance_id = self._run_instance(**kwargs)
diff --git a/nova/tests/api/ec2/test_ec2_validate.py b/nova/tests/api/ec2/test_ec2_validate.py
index c9c11d547..0f15c66b8 100644
--- a/nova/tests/api/ec2/test_ec2_validate.py
+++ b/nova/tests/api/ec2/test_ec2_validate.py
@@ -21,10 +21,10 @@ import datetime
from nova.api.ec2 import cloud
from nova.api.ec2 import ec2utils
from nova.compute import utils as compute_utils
+from nova import config
from nova import context
from nova import db
from nova import exception
-from nova import flags
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
from nova.openstack.common import timeutils
@@ -32,8 +32,8 @@ from nova import test
from nova.tests import fake_network
from nova.tests.image import fake
+CONF = config.CONF
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
class EC2ValidateTestCase(test.TestCase):
diff --git a/nova/tests/api/ec2/test_middleware.py b/nova/tests/api/ec2/test_middleware.py
index a618817bd..25cba1c4d 100644
--- a/nova/tests/api/ec2/test_middleware.py
+++ b/nova/tests/api/ec2/test_middleware.py
@@ -22,13 +22,13 @@ import webob.dec
import webob.exc
from nova.api import ec2
+from nova import config
from nova import context
from nova import exception
-from nova import flags
from nova.openstack.common import timeutils
from nova import test
-FLAGS = flags.FLAGS
+CONF = config.CONF
@webob.dec.wsgify
@@ -62,28 +62,28 @@ class LockoutTestCase(test.TestCase):
return (req.get_response(self.lockout).status_int == 403)
def test_lockout(self):
- self._send_bad_attempts('test', FLAGS.lockout_attempts)
+ self._send_bad_attempts('test', CONF.lockout_attempts)
self.assertTrue(self._is_locked_out('test'))
def test_timeout(self):
- self._send_bad_attempts('test', FLAGS.lockout_attempts)
+ self._send_bad_attempts('test', CONF.lockout_attempts)
self.assertTrue(self._is_locked_out('test'))
- timeutils.advance_time_seconds(FLAGS.lockout_minutes * 60)
+ timeutils.advance_time_seconds(CONF.lockout_minutes * 60)
self.assertFalse(self._is_locked_out('test'))
def test_multiple_keys(self):
- self._send_bad_attempts('test1', FLAGS.lockout_attempts)
+ self._send_bad_attempts('test1', CONF.lockout_attempts)
self.assertTrue(self._is_locked_out('test1'))
self.assertFalse(self._is_locked_out('test2'))
- timeutils.advance_time_seconds(FLAGS.lockout_minutes * 60)
+ timeutils.advance_time_seconds(CONF.lockout_minutes * 60)
self.assertFalse(self._is_locked_out('test1'))
self.assertFalse(self._is_locked_out('test2'))
def test_window_timeout(self):
- self._send_bad_attempts('test', FLAGS.lockout_attempts - 1)
+ self._send_bad_attempts('test', CONF.lockout_attempts - 1)
self.assertFalse(self._is_locked_out('test'))
- timeutils.advance_time_seconds(FLAGS.lockout_window * 60)
- self._send_bad_attempts('test', FLAGS.lockout_attempts - 1)
+ timeutils.advance_time_seconds(CONF.lockout_window * 60)
+ self._send_bad_attempts('test', CONF.lockout_attempts - 1)
self.assertFalse(self._is_locked_out('test'))
diff --git a/nova/tests/api/openstack/compute/contrib/test_admin_actions.py b/nova/tests/api/openstack/compute/contrib/test_admin_actions.py
index c412b2c5c..9ede7dd17 100644
--- a/nova/tests/api/openstack/compute/contrib/test_admin_actions.py
+++ b/nova/tests/api/openstack/compute/contrib/test_admin_actions.py
@@ -13,6 +13,7 @@
# under the License.
import datetime
+import uuid
import webob
@@ -20,17 +21,16 @@ from nova.api.openstack import compute
from nova.api.openstack.compute.contrib import admin_actions
from nova.compute import api as compute_api
from nova.compute import vm_states
+from nova import config
from nova import context
from nova import exception
-from nova import flags
from nova.openstack.common import jsonutils
from nova.scheduler import rpcapi as scheduler_rpcapi
from nova import test
from nova.tests.api.openstack import fakes
-from nova import utils
-FLAGS = flags.FLAGS
+CONF = config.CONF
INSTANCE = {
"id": 1,
@@ -88,7 +88,7 @@ class AdminActionsTest(test.TestCase):
def setUp(self):
super(AdminActionsTest, self).setUp()
self.stubs.Set(compute_api.API, 'get', fake_compute_api_get)
- self.UUID = utils.gen_uuid()
+ self.UUID = uuid.uuid4()
for _method in self._methods:
self.stubs.Set(compute_api.API, _method, fake_compute_api)
self.stubs.Set(scheduler_rpcapi.SchedulerAPI,
@@ -181,7 +181,7 @@ class CreateBackupTests(test.TestCase):
self.stubs.Set(compute_api.API, 'get', fake_compute_api_get)
self.backup_stubs = fakes.stub_out_compute_api_backup(self.stubs)
self.app = compute.APIRouter(init_only=('servers',))
- self.uuid = utils.gen_uuid()
+ self.uuid = uuid.uuid4()
def _get_request(self, body):
url = '/fake/servers/%s/action' % self.uuid
@@ -216,7 +216,7 @@ class CreateBackupTests(test.TestCase):
'metadata': {'123': 'asdf'},
},
}
- for num in range(FLAGS.quota_metadata_items + 1):
+ for num in range(CONF.quota_metadata_items + 1):
body['createBackup']['metadata']['foo%i' % num] = "bar"
request = self._get_request(body)
@@ -307,7 +307,7 @@ class ResetStateTests(test.TestCase):
self.exists = True
self.kwargs = None
- self.uuid = utils.gen_uuid()
+ self.uuid = uuid.uuid4()
def fake_get(inst, context, instance_id):
if self.exists:
diff --git a/nova/tests/api/openstack/compute/contrib/test_aggregates.py b/nova/tests/api/openstack/compute/contrib/test_aggregates.py
index 4fa68bd7d..a209fdce8 100644
--- a/nova/tests/api/openstack/compute/contrib/test_aggregates.py
+++ b/nova/tests/api/openstack/compute/contrib/test_aggregates.py
@@ -22,6 +22,7 @@ from nova import context
from nova import exception
from nova.openstack.common import log as logging
from nova import test
+from nova.tests import matchers
LOG = logging.getLogger(__name__)
AGGREGATE_LIST = [
@@ -319,7 +320,8 @@ class AggregateTestCase(test.TestCase):
def stub_update_aggregate(context, aggregate, values):
self.assertEqual(context, self.context, "context")
self.assertEqual("1", aggregate, "aggregate")
- self.assertDictMatch(body["set_metadata"]['metadata'], values)
+ self.assertThat(body["set_metadata"]['metadata'],
+ matchers.DictMatches(values))
return AGGREGATE
self.stubs.Set(self.controller.api,
"update_aggregate_metadata",
diff --git a/nova/tests/api/openstack/compute/contrib/test_cloudpipe.py b/nova/tests/api/openstack/compute/contrib/test_cloudpipe.py
index 97b78f81e..348816833 100644
--- a/nova/tests/api/openstack/compute/contrib/test_cloudpipe.py
+++ b/nova/tests/api/openstack/compute/contrib/test_cloudpipe.py
@@ -18,21 +18,21 @@ from lxml import etree
from nova.api.openstack.compute.contrib import cloudpipe
from nova.api.openstack import wsgi
from nova.compute import utils as compute_utils
+from nova import config
from nova import db
-from nova import flags
from nova.openstack.common import timeutils
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import fake_network
+from nova.tests import matchers
from nova import utils
-
-FLAGS = flags.FLAGS
+CONF = config.CONF
def fake_vpn_instance():
return {
- 'id': 7, 'image_ref': FLAGS.vpn_image_id, 'vm_state': 'active',
+ 'id': 7, 'image_ref': CONF.vpn_image_id, 'vm_state': 'active',
'created_at': timeutils.parse_strtime('1981-10-20T00:00:00.000000'),
'uuid': 7777, 'project_id': 'other',
}
@@ -108,7 +108,7 @@ class CloudpipeTest(test.TestCase):
'state': 'running',
'instance_id': 7777,
'created_at': '1981-10-20T00:00:00Z'}]}
- self.assertDictMatch(res_dict, response)
+ self.assertThat(res_dict, matchers.DictMatches(response))
def test_cloudpipe_create(self):
def launch_vpn_instance(context):
diff --git a/nova/tests/api/openstack/compute/contrib/test_createserverext.py b/nova/tests/api/openstack/compute/contrib/test_createserverext.py
index 74af62e60..07f7f6eec 100644
--- a/nova/tests/api/openstack/compute/contrib/test_createserverext.py
+++ b/nova/tests/api/openstack/compute/contrib/test_createserverext.py
@@ -21,16 +21,13 @@ from xml.dom import minidom
import webob
from nova.compute import api as compute_api
+from nova import config
from nova import db
from nova import exception
-from nova import flags
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
-
-FLAGS = flags.FLAGS
-
FAKE_UUID = fakes.FAKE_UUID
FAKE_NETWORKS = [('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '10.0.1.12'),
diff --git a/nova/tests/api/openstack/compute/contrib/test_disk_config.py b/nova/tests/api/openstack/compute/contrib/test_disk_config.py
index 9206899d6..c79fda910 100644
--- a/nova/tests/api/openstack/compute/contrib/test_disk_config.py
+++ b/nova/tests/api/openstack/compute/contrib/test_disk_config.py
@@ -18,8 +18,8 @@
import datetime
from nova.api.openstack import compute
+from nova import config
import nova.db.api
-from nova import flags
from nova.openstack.common import jsonutils
import nova.openstack.common.rpc
from nova import test
@@ -31,8 +31,6 @@ MANUAL_INSTANCE_UUID = fakes.FAKE_UUID
AUTO_INSTANCE_UUID = fakes.FAKE_UUID.replace('a', 'b')
stub_instance = fakes.stub_instance
-FLAGS = flags.FLAGS
-
API_DISK_CONFIG = 'OS-DCF:diskConfig'
diff --git a/nova/tests/api/openstack/compute/contrib/test_extended_server_attributes.py b/nova/tests/api/openstack/compute/contrib/test_extended_server_attributes.py
index 31e40d7ca..e9c0c0bc8 100644
--- a/nova/tests/api/openstack/compute/contrib/test_extended_server_attributes.py
+++ b/nova/tests/api/openstack/compute/contrib/test_extended_server_attributes.py
@@ -18,17 +18,13 @@ import webob
from nova.api.openstack.compute.contrib import extended_server_attributes
from nova import compute
+from nova import config
from nova import db
from nova import exception
-from nova import flags
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
-
-FLAGS = flags.FLAGS
-
-
UUID1 = '00000000-0000-0000-0000-000000000001'
UUID2 = '00000000-0000-0000-0000-000000000002'
UUID3 = '00000000-0000-0000-0000-000000000003'
diff --git a/nova/tests/api/openstack/compute/contrib/test_extended_status.py b/nova/tests/api/openstack/compute/contrib/test_extended_status.py
index 3ca4000e5..457b8bd32 100644
--- a/nova/tests/api/openstack/compute/contrib/test_extended_status.py
+++ b/nova/tests/api/openstack/compute/contrib/test_extended_status.py
@@ -18,16 +18,12 @@ import webob
from nova.api.openstack.compute.contrib import extended_status
from nova import compute
+from nova import config
from nova import exception
-from nova import flags
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
-
-FLAGS = flags.FLAGS
-
-
UUID1 = '00000000-0000-0000-0000-000000000001'
UUID2 = '00000000-0000-0000-0000-000000000002'
UUID3 = '00000000-0000-0000-0000-000000000003'
diff --git a/nova/tests/api/openstack/compute/contrib/test_fixed_ips.py b/nova/tests/api/openstack/compute/contrib/test_fixed_ips.py
new file mode 100644
index 000000000..4d5ca5e5c
--- /dev/null
+++ b/nova/tests/api/openstack/compute/contrib/test_fixed_ips.py
@@ -0,0 +1,164 @@
+# Copyright 2012 IBM
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import webob
+
+from nova.api.openstack.compute.contrib import fixed_ips
+from nova import context
+from nova import db
+from nova import exception
+from nova.openstack.common import jsonutils
+from nova import test
+from nova.tests.api.openstack import fakes
+
+
+fake_fixed_ips = [{'id': 1,
+ 'address': '192.168.1.1',
+ 'network_id': 1,
+ 'virtual_interface_id': 1,
+ 'instance_uuid': '1',
+ 'allocated': False,
+ 'leased': False,
+ 'reserved': False,
+ 'host': None},
+ {'id': 2,
+ 'address': '192.168.1.2',
+ 'network_id': 1,
+ 'virtual_interface_id': 2,
+ 'instance_uuid': '2',
+ 'allocated': False,
+ 'leased': False,
+ 'reserved': False,
+ 'host': None},
+ ]
+
+
+def fake_fixed_ip_get_by_address(context, address):
+ for fixed_ip in fake_fixed_ips:
+ if fixed_ip['address'] == address:
+ return fixed_ip
+ raise exception.FixedIpNotFoundForAddress(address=address)
+
+
+def fake_fixed_ip_get_by_address_detailed(context, address):
+ network = {'id': 1,
+ 'cidr': "192.168.1.0/24"}
+ for fixed_ip in fake_fixed_ips:
+ if fixed_ip['address'] == address:
+ return (fixed_ip, FakeModel(network), None)
+ raise exception.FixedIpNotFoundForAddress(address=address)
+
+
+def fake_fixed_ip_update(context, address, values):
+ fixed_ip = fake_fixed_ip_get_by_address(context, address)
+ if fixed_ip is None:
+ raise exception.FixedIpNotFoundForAddress(address=address)
+ else:
+ for key in values:
+ fixed_ip[key] = values[key]
+
+
+class FakeModel(object):
+ """Stubs out for model."""
+ def __init__(self, values):
+ self.values = values
+
+ def __getattr__(self, name):
+ return self.values[name]
+
+ def __getitem__(self, key):
+ if key in self.values:
+ return self.values[key]
+ else:
+ raise NotImplementedError()
+
+ def __repr__(self):
+ return '<FakeModel: %s>' % self.values
+
+
+def fake_network_get_all(context):
+ network = {'id': 1,
+ 'cidr': "192.168.1.0/24"}
+ return [FakeModel(network)]
+
+
+class FixedIpTest(test.TestCase):
+
+ def setUp(self):
+ super(FixedIpTest, self).setUp()
+
+ self.stubs.Set(db, "fixed_ip_get_by_address",
+ fake_fixed_ip_get_by_address)
+ self.stubs.Set(db, "fixed_ip_get_by_address_detailed",
+ fake_fixed_ip_get_by_address_detailed)
+ self.stubs.Set(db, "fixed_ip_update", fake_fixed_ip_update)
+
+ self.context = context.get_admin_context()
+ self.controller = fixed_ips.FixedIPController()
+
+ def tearDown(self):
+ super(FixedIpTest, self).tearDown()
+
+ def test_fixed_ips_get(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/os-fixed-ips/192.168.1.1')
+ res_dict = self.controller.show(req, '192.168.1.1')
+ response = {'fixed_ip': {'cidr': '192.168.1.0/24',
+ 'hostname': None,
+ 'host': None,
+ 'address': '192.168.1.1'}}
+ self.assertEqual(response, res_dict)
+
+ def test_fixed_ips_get_fail(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/os-fixed-ips/10.0.0.1')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req,
+ '10.0.0.1')
+
+ def test_fixed_ip_reserve(self):
+ fake_fixed_ips[0]['reserved'] = False
+ ip_addr = '192.168.1.1'
+ body = {'reserve': None}
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-fixed-ips/192.168.1.1/action')
+ result = self.controller.action(req, "192.168.1.1", body)
+
+ self.assertEqual('202 Accepted', result.status)
+ self.assertEqual(fake_fixed_ips[0]['reserved'], True)
+
+ def test_fixed_ip_reserve_bad_ip(self):
+ ip_addr = '10.0.0.1'
+ body = {'reserve': None}
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-fixed-ips/10.0.0.1/action')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.action, req,
+ '10.0.0.1', body)
+
+ def test_fixed_ip_unreserve(self):
+ fake_fixed_ips[0]['reserved'] = True
+ ip_addr = '192.168.1.1'
+ body = {'unreserve': None}
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-fixed-ips/192.168.1.1/action')
+ result = self.controller.action(req, "192.168.1.1", body)
+
+ self.assertEqual('202 Accepted', result.status)
+ self.assertEqual(fake_fixed_ips[0]['reserved'], False)
+
+ def test_fixed_ip_unreserve_bad_ip(self):
+ ip_addr = '10.0.0.1'
+ body = {'unreserve': None}
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-fixed-ips/10.0.0.1/action')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.action, req,
+ '10.0.0.1', body)
diff --git a/nova/tests/api/openstack/compute/contrib/test_flavor_disabled.py b/nova/tests/api/openstack/compute/contrib/test_flavor_disabled.py
index 1225b56b9..b2400bc51 100644
--- a/nova/tests/api/openstack/compute/contrib/test_flavor_disabled.py
+++ b/nova/tests/api/openstack/compute/contrib/test_flavor_disabled.py
@@ -17,15 +17,11 @@ import webob
from nova.api.openstack.compute.contrib import flavor_disabled
from nova.compute import instance_types
-from nova import flags
+from nova import config
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
-
-FLAGS = flags.FLAGS
-
-
FAKE_FLAVORS = {
'flavor 1': {
"flavorid": '1',
diff --git a/nova/tests/api/openstack/compute/contrib/test_flavor_rxtx.py b/nova/tests/api/openstack/compute/contrib/test_flavor_rxtx.py
index 52163c700..a6624e88d 100644
--- a/nova/tests/api/openstack/compute/contrib/test_flavor_rxtx.py
+++ b/nova/tests/api/openstack/compute/contrib/test_flavor_rxtx.py
@@ -16,15 +16,11 @@ from lxml import etree
import webob
from nova.compute import instance_types
-from nova import flags
+from nova import config
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
-
-FLAGS = flags.FLAGS
-
-
FAKE_FLAVORS = {
'flavor 1': {
"flavorid": '1',
diff --git a/nova/tests/api/openstack/compute/contrib/test_flavor_swap.py b/nova/tests/api/openstack/compute/contrib/test_flavor_swap.py
index 75e9cd76b..e8c66962a 100644
--- a/nova/tests/api/openstack/compute/contrib/test_flavor_swap.py
+++ b/nova/tests/api/openstack/compute/contrib/test_flavor_swap.py
@@ -16,15 +16,11 @@ from lxml import etree
import webob
from nova.compute import instance_types
-from nova import flags
+from nova import config
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
-
-FLAGS = flags.FLAGS
-
-
FAKE_FLAVORS = {
'flavor 1': {
"flavorid": '1',
diff --git a/nova/tests/api/openstack/compute/contrib/test_floating_ips.py b/nova/tests/api/openstack/compute/contrib/test_floating_ips.py
index 171b0900e..d67682a4f 100644
--- a/nova/tests/api/openstack/compute/contrib/test_floating_ips.py
+++ b/nova/tests/api/openstack/compute/contrib/test_floating_ips.py
@@ -14,6 +14,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import uuid
+
from lxml import etree
import webob
@@ -29,7 +31,7 @@ from nova.openstack.common import rpc
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import fake_network
-from nova import utils
+
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
@@ -88,7 +90,7 @@ def network_api_disassociate(self, context, instance, floating_address):
def fake_instance_get(context, instance_id):
return {
"id": 1,
- "uuid": utils.gen_uuid(),
+ "uuid": uuid.uuid4(),
"name": 'fake',
"user_id": 'fakeuser',
"project_id": '123'}
diff --git a/nova/tests/api/openstack/compute/contrib/test_fping.py b/nova/tests/api/openstack/compute/contrib/test_fping.py
new file mode 100644
index 000000000..9a838162a
--- /dev/null
+++ b/nova/tests/api/openstack/compute/contrib/test_fping.py
@@ -0,0 +1,94 @@
+# Copyright 2011 Grid Dynamics
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.api.openstack.compute.contrib import fping
+from nova.api.openstack import extensions
+from nova import exception
+from nova import test
+from nova.tests.api.openstack import fakes
+import nova.utils
+
+
+FAKE_UUID = fakes.FAKE_UUID
+
+
+def execute(*cmd, **args):
+ return "".join(["%s is alive" % ip for ip in cmd[1:]])
+
+
+class FpingTest(test.TestCase):
+
+ def setUp(self):
+ super(FpingTest, self).setUp()
+ self.flags(verbose=True, use_ipv6=False)
+ return_server = fakes.fake_instance_get()
+ return_servers = fakes.fake_instance_get_all_by_filters()
+ self.stubs.Set(nova.db, "instance_get_all_by_filters",
+ return_servers)
+ self.stubs.Set(nova.db, "instance_get_by_uuid",
+ return_server)
+ self.stubs.Set(nova.db, "instance_get_all_by_project",
+ return_servers)
+ self.stubs.Set(nova.utils, "execute",
+ execute)
+ self.stubs.Set(fping.FpingController, "check_fping",
+ lambda self: None)
+ self.ext_mgr = extensions.ExtensionManager()
+ self.ext_mgr.extensions = {}
+ self.controller = fping.FpingController(self.ext_mgr)
+
+ def test_fping_index(self):
+ req = fakes.HTTPRequest.blank("/v2/1234/os-fping")
+ res_dict = self.controller.index(req)
+ self.assertTrue("servers" in res_dict)
+ for srv in res_dict["servers"]:
+ for key in "project_id", "id", "alive":
+ self.assertTrue(key in srv)
+
+ def test_fping_index_policy(self):
+ req = fakes.HTTPRequest.blank("/v2/1234/os-fping?all_tenants=1")
+ self.assertRaises(exception.NotAuthorized, self.controller.index, req)
+ req = fakes.HTTPRequest.blank("/v2/1234/os-fping?all_tenants=1")
+ req.environ["nova.context"].is_admin = True
+ res_dict = self.controller.index(req)
+ self.assertTrue("servers" in res_dict)
+
+ def test_fping_index_include(self):
+ req = fakes.HTTPRequest.blank("/v2/1234/os-fping")
+ res_dict = self.controller.index(req)
+ ids = [srv["id"] for srv in res_dict["servers"]]
+ req = fakes.HTTPRequest.blank("/v2/1234/os-fping?include=%s" % ids[0])
+ res_dict = self.controller.index(req)
+ self.assertEqual(len(res_dict["servers"]), 1)
+ self.assertEqual(res_dict["servers"][0]["id"], ids[0])
+
+ def test_fping_index_exclude(self):
+ req = fakes.HTTPRequest.blank("/v2/1234/os-fping")
+ res_dict = self.controller.index(req)
+ ids = [srv["id"] for srv in res_dict["servers"]]
+ req = fakes.HTTPRequest.blank("/v2/1234/os-fping?exclude=%s" %
+ ",".join(ids[1:]))
+ res_dict = self.controller.index(req)
+ self.assertEqual(len(res_dict["servers"]), 1)
+ self.assertEqual(res_dict["servers"][0]["id"], ids[0])
+
+ def test_fping_show(self):
+ req = fakes.HTTPRequest.blank("/v2/1234/os-fping/%s" % FAKE_UUID)
+ res_dict = self.controller.show(req, FAKE_UUID)
+ self.assertTrue("server" in res_dict)
+ srv = res_dict["server"]
+ for key in "project_id", "id", "alive":
+ self.assertTrue(key in srv)
diff --git a/nova/tests/api/openstack/compute/contrib/test_hosts.py b/nova/tests/api/openstack/compute/contrib/test_hosts.py
index b2b5407f5..f5bce3184 100644
--- a/nova/tests/api/openstack/compute/contrib/test_hosts.py
+++ b/nova/tests/api/openstack/compute/contrib/test_hosts.py
@@ -21,12 +21,9 @@ from nova.compute import power_state
from nova.compute import vm_states
from nova import context
from nova import db
-from nova import flags
from nova.openstack.common import log as logging
from nova import test
-
-FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
HOST_LIST = [
{"host_name": "host_c1", "service": "compute", "zone": "nova"},
diff --git a/nova/tests/api/openstack/compute/contrib/test_networks.py b/nova/tests/api/openstack/compute/contrib/test_networks.py
index 808493f1b..2540904ee 100644
--- a/nova/tests/api/openstack/compute/contrib/test_networks.py
+++ b/nova/tests/api/openstack/compute/contrib/test_networks.py
@@ -23,15 +23,12 @@ import uuid
import webob
from nova.api.openstack.compute.contrib import networks
+from nova import config
from nova import exception
from nova import test
from nova.tests.api.openstack import fakes
-from nova import flags
-
-
-FLAGS = flags.FLAGS
-
+CONF = config.CONF
FAKE_NETWORKS = [
{
@@ -137,12 +134,12 @@ class FakeNetworkAPI(object):
def create(self, context, **kwargs):
subnet_bits = int(math.ceil(math.log(kwargs.get(
- 'network_size', FLAGS.network_size), 2)))
+ 'network_size', CONF.network_size), 2)))
fixed_net_v4 = netaddr.IPNetwork(kwargs['cidr'])
prefixlen_v4 = 32 - subnet_bits
subnets_v4 = list(fixed_net_v4.subnet(
prefixlen_v4,
- count=kwargs.get('num_networks', FLAGS.num_networks)))
+ count=kwargs.get('num_networks', CONF.num_networks)))
new_networks = []
new_id = max((net['id'] for net in self.networks))
for index, subnet_v4 in enumerate(subnets_v4):
diff --git a/nova/tests/api/openstack/compute/contrib/test_quota_classes.py b/nova/tests/api/openstack/compute/contrib/test_quota_classes.py
index b732f889c..a8d651977 100644
--- a/nova/tests/api/openstack/compute/contrib/test_quota_classes.py
+++ b/nova/tests/api/openstack/compute/contrib/test_quota_classes.py
@@ -24,11 +24,11 @@ from nova.tests.api.openstack import fakes
def quota_set(class_name):
return {'quota_class_set': {'id': class_name, 'metadata_items': 128,
- 'volumes': 10, 'gigabytes': 1000, 'ram': 51200,
- 'floating_ips': 10, 'instances': 10, 'injected_files': 5,
- 'cores': 20, 'injected_file_content_bytes': 10240,
- 'security_groups': 10, 'security_group_rules': 20,
- 'key_pairs': 100, 'injected_file_path_bytes': 255}}
+ 'ram': 51200, 'floating_ips': 10, 'instances': 10,
+ 'injected_files': 5, 'cores': 20,
+ 'injected_file_content_bytes': 10240, 'security_groups': 10,
+ 'security_group_rules': 20, 'key_pairs': 100,
+ 'injected_file_path_bytes': 255}}
class QuotaClassSetsTest(test.TestCase):
@@ -42,10 +42,8 @@ class QuotaClassSetsTest(test.TestCase):
'instances': 10,
'cores': 20,
'ram': 51200,
- 'volumes': 10,
'floating_ips': 10,
'metadata_items': 128,
- 'gigabytes': 1000,
'injected_files': 5,
'injected_file_path_bytes': 255,
'injected_file_content_bytes': 10240,
@@ -62,8 +60,6 @@ class QuotaClassSetsTest(test.TestCase):
self.assertEqual(qs['instances'], 10)
self.assertEqual(qs['cores'], 20)
self.assertEqual(qs['ram'], 51200)
- self.assertEqual(qs['volumes'], 10)
- self.assertEqual(qs['gigabytes'], 1000)
self.assertEqual(qs['floating_ips'], 10)
self.assertEqual(qs['metadata_items'], 128)
self.assertEqual(qs['injected_files'], 5)
@@ -89,8 +85,7 @@ class QuotaClassSetsTest(test.TestCase):
def test_quotas_update_as_admin(self):
body = {'quota_class_set': {'instances': 50, 'cores': 50,
- 'ram': 51200, 'volumes': 10,
- 'gigabytes': 1000, 'floating_ips': 10,
+ 'ram': 51200, 'floating_ips': 10,
'metadata_items': 128, 'injected_files': 5,
'injected_file_content_bytes': 10240,
'injected_file_path_bytes': 255,
@@ -108,8 +103,7 @@ class QuotaClassSetsTest(test.TestCase):
def test_quotas_update_as_user(self):
body = {'quota_class_set': {'instances': 50, 'cores': 50,
- 'ram': 51200, 'volumes': 10,
- 'gigabytes': 1000, 'floating_ips': 10,
+ 'ram': 51200, 'floating_ips': 10,
'metadata_items': 128, 'injected_files': 5,
'injected_file_content_bytes': 10240,
'security_groups': 10,
@@ -135,8 +129,6 @@ class QuotaTemplateXMLSerializerTest(test.TestCase):
metadata_items=10,
injected_file_path_bytes=255,
injected_file_content_bytes=20,
- volumes=30,
- gigabytes=40,
ram=50,
floating_ips=60,
instances=70,
@@ -162,8 +154,6 @@ class QuotaTemplateXMLSerializerTest(test.TestCase):
exemplar = dict(quota_class_set=dict(
metadata_items='10',
injected_file_content_bytes='20',
- volumes='30',
- gigabytes='40',
ram='50',
floating_ips='60',
instances='70',
@@ -177,8 +167,6 @@ class QuotaTemplateXMLSerializerTest(test.TestCase):
'<metadata_items>10</metadata_items>'
'<injected_file_content_bytes>20'
'</injected_file_content_bytes>'
- '<volumes>30</volumes>'
- '<gigabytes>40</gigabytes>'
'<ram>50</ram>'
'<floating_ips>60</floating_ips>'
'<instances>70</instances>'
diff --git a/nova/tests/api/openstack/compute/contrib/test_quotas.py b/nova/tests/api/openstack/compute/contrib/test_quotas.py
index f628535a7..47761b6a1 100644
--- a/nova/tests/api/openstack/compute/contrib/test_quotas.py
+++ b/nova/tests/api/openstack/compute/contrib/test_quotas.py
@@ -25,8 +25,8 @@ from nova.tests.api.openstack import fakes
def quota_set(id):
- return {'quota_set': {'id': id, 'metadata_items': 128, 'volumes': 10,
- 'gigabytes': 1000, 'ram': 51200, 'floating_ips': 10,
+ return {'quota_set': {'id': id, 'metadata_items': 128,
+ 'ram': 51200, 'floating_ips': 10,
'instances': 10, 'injected_files': 5, 'cores': 20,
'injected_file_content_bytes': 10240,
'security_groups': 10, 'security_group_rules': 20,
@@ -44,10 +44,8 @@ class QuotaSetsTest(test.TestCase):
'instances': 10,
'cores': 20,
'ram': 51200,
- 'volumes': 10,
'floating_ips': 10,
'metadata_items': 128,
- 'gigabytes': 1000,
'injected_files': 5,
'injected_file_path_bytes': 255,
'injected_file_content_bytes': 10240,
@@ -63,8 +61,6 @@ class QuotaSetsTest(test.TestCase):
self.assertEqual(qs['instances'], 10)
self.assertEqual(qs['cores'], 20)
self.assertEqual(qs['ram'], 51200)
- self.assertEqual(qs['volumes'], 10)
- self.assertEqual(qs['gigabytes'], 1000)
self.assertEqual(qs['floating_ips'], 10)
self.assertEqual(qs['metadata_items'], 128)
self.assertEqual(qs['injected_files'], 5)
@@ -85,8 +81,6 @@ class QuotaSetsTest(test.TestCase):
'instances': 10,
'cores': 20,
'ram': 51200,
- 'volumes': 10,
- 'gigabytes': 1000,
'floating_ips': 10,
'metadata_items': 128,
'injected_files': 5,
@@ -113,8 +107,7 @@ class QuotaSetsTest(test.TestCase):
def test_quotas_update_as_admin(self):
body = {'quota_set': {'instances': 50, 'cores': 50,
- 'ram': 51200, 'volumes': 10,
- 'gigabytes': 1000, 'floating_ips': 10,
+ 'ram': 51200, 'floating_ips': 10,
'metadata_items': 128, 'injected_files': 5,
'injected_file_content_bytes': 10240,
'injected_file_path_bytes': 255,
@@ -130,8 +123,7 @@ class QuotaSetsTest(test.TestCase):
def test_quotas_update_as_user(self):
body = {'quota_set': {'instances': 50, 'cores': 50,
- 'ram': 51200, 'volumes': 10,
- 'gigabytes': 1000, 'floating_ips': 10,
+ 'ram': 51200, 'floating_ips': 10,
'metadata_items': 128, 'injected_files': 5,
'injected_file_content_bytes': 10240,
'security_groups': 10,
@@ -144,8 +136,7 @@ class QuotaSetsTest(test.TestCase):
def test_quotas_update_invalid_limit(self):
body = {'quota_set': {'instances': -2, 'cores': -2,
- 'ram': -2, 'volumes': -2,
- 'gigabytes': -2, 'floating_ips': -2,
+ 'ram': -2, 'floating_ips': -2,
'metadata_items': -2, 'injected_files': -2,
'injected_file_content_bytes': -2}}
@@ -167,8 +158,6 @@ class QuotaXMLSerializerTest(test.TestCase):
metadata_items=10,
injected_file_path_bytes=255,
injected_file_content_bytes=20,
- volumes=30,
- gigabytes=40,
ram=50,
floating_ips=60,
instances=70,
@@ -193,8 +182,6 @@ class QuotaXMLSerializerTest(test.TestCase):
exemplar = dict(quota_set=dict(
metadata_items='10',
injected_file_content_bytes='20',
- volumes='30',
- gigabytes='40',
ram='50',
floating_ips='60',
instances='70',
@@ -208,8 +195,6 @@ class QuotaXMLSerializerTest(test.TestCase):
'<metadata_items>10</metadata_items>'
'<injected_file_content_bytes>20'
'</injected_file_content_bytes>'
- '<volumes>30</volumes>'
- '<gigabytes>40</gigabytes>'
'<ram>50</ram>'
'<floating_ips>60</floating_ips>'
'<instances>70</instances>'
diff --git a/nova/tests/api/openstack/compute/contrib/test_rescue.py b/nova/tests/api/openstack/compute/contrib/test_rescue.py
index 88e9141f7..2a7e973ff 100644
--- a/nova/tests/api/openstack/compute/contrib/test_rescue.py
+++ b/nova/tests/api/openstack/compute/contrib/test_rescue.py
@@ -15,13 +15,13 @@
import webob
from nova import compute
+from nova import config
from nova import exception
-from nova import flags
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
-FLAGS = flags.FLAGS
+CONF = config.CONF
def rescue(self, context, instance, rescue_password=None):
@@ -71,7 +71,7 @@ class RescueTest(test.TestCase):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
resp_json = jsonutils.loads(resp.body)
- self.assertEqual(FLAGS.password_length, len(resp_json['adminPass']))
+ self.assertEqual(CONF.password_length, len(resp_json['adminPass']))
def test_rescue_of_rescued_instance(self):
body = dict(rescue=None)
diff --git a/nova/tests/api/openstack/compute/contrib/test_security_groups.py b/nova/tests/api/openstack/compute/contrib/test_security_groups.py
index d33da9ca5..6fdeb0fec 100644
--- a/nova/tests/api/openstack/compute/contrib/test_security_groups.py
+++ b/nova/tests/api/openstack/compute/contrib/test_security_groups.py
@@ -25,19 +25,17 @@ from nova.api.openstack.compute.contrib import security_groups
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
+from nova import config
import nova.db
from nova import exception
-from nova import flags
from nova.openstack.common import jsonutils
from nova import quota
from nova import test
from nova.tests.api.openstack import fakes
-
+CONF = config.CONF
FAKE_UUID = 'a47ae74e-ab08-447f-8eee-ffd43fc46c16'
-FLAGS = flags.FLAGS
-
class AttrDict(dict):
def __getattr__(self, k):
@@ -259,7 +257,7 @@ class TestSecurityGroups(test.TestCase):
def test_create_security_group_quota_limit(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
- for num in range(1, FLAGS.quota_security_groups + 1):
+ for num in range(1, CONF.quota_security_groups + 1):
name = 'test%s' % num
sg = security_group_template(name=name)
res_dict = self.controller.create(req, {'security_group': sg})
@@ -1000,7 +998,7 @@ class TestSecurityGroupRules(test.TestCase):
def test_create_rule_quota_limit(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
- for num in range(100, 100 + FLAGS.quota_security_group_rules):
+ for num in range(100, 100 + CONF.quota_security_group_rules):
rule = {
'ip_protocol': 'tcp', 'from_port': num,
'to_port': num, 'parent_group_id': '2', 'group_id': '1'
diff --git a/nova/tests/api/openstack/compute/contrib/test_server_start_stop.py b/nova/tests/api/openstack/compute/contrib/test_server_start_stop.py
index e0d3cbb0f..fb912d0c4 100644
--- a/nova/tests/api/openstack/compute/contrib/test_server_start_stop.py
+++ b/nova/tests/api/openstack/compute/contrib/test_server_start_stop.py
@@ -64,7 +64,3 @@ class ServerStartStopTest(test.TestCase):
body = dict(start="")
self.assertRaises(webob.exc.HTTPNotFound,
self.controller._stop_server, req, 'test_inst', body)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/nova/tests/api/openstack/compute/contrib/test_services.py b/nova/tests/api/openstack/compute/contrib/test_services.py
new file mode 100644
index 000000000..24f169d98
--- /dev/null
+++ b/nova/tests/api/openstack/compute/contrib/test_services.py
@@ -0,0 +1,198 @@
+# Copyright 2012 IBM
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from datetime import datetime
+from nova.api.openstack.compute.contrib import services
+from nova import context
+from nova import db
+from nova import exception
+from nova.openstack.common import timeutils
+from nova import test
+from nova.tests.api.openstack import fakes
+
+
+fake_services_list = [{'binary': 'nova-scheduler',
+ 'host': 'host1',
+ 'availability_zone': 'nova',
+ 'id': 1,
+ 'disabled': True,
+ 'updated_at': datetime(2012, 10, 29, 13, 42, 2),
+ 'created_at': datetime(2012, 9, 18, 2, 46, 27)},
+ {'binary': 'nova-compute',
+ 'host': 'host1',
+ 'availability_zone': 'nova',
+ 'id': 2,
+ 'disabled': True,
+ 'updated_at': datetime(2012, 10, 29, 13, 42, 5),
+ 'created_at': datetime(2012, 9, 18, 2, 46, 27)},
+ {'binary': 'nova-scheduler',
+ 'host': 'host2',
+ 'availability_zone': 'nova',
+ 'id': 3,
+ 'disabled': False,
+ 'updated_at': datetime(2012, 9, 19, 6, 55, 34),
+ 'created_at': datetime(2012, 9, 18, 2, 46, 28)},
+ {'binary': 'nova-compute',
+ 'host': 'host2',
+ 'availability_zone': 'nova',
+ 'id': 4,
+ 'disabled': True,
+ 'updated_at': datetime(2012, 9, 18, 8, 3, 38),
+ 'created_at': datetime(2012, 9, 18, 2, 46, 28)},
+ ]
+
+
+class FakeRequest(object):
+ environ = {"nova.context": context.get_admin_context()}
+ GET = {}
+
+
+class FakeRequestWithSevice(object):
+ environ = {"nova.context": context.get_admin_context()}
+ GET = {"service": "nova-compute"}
+
+
+class FakeRequestWithHost(object):
+ environ = {"nova.context": context.get_admin_context()}
+ GET = {"host": "host1"}
+
+
+class FakeRequestWithHostService(object):
+ environ = {"nova.context": context.get_admin_context()}
+ GET = {"host": "host1", "service": "nova-compute"}
+
+
+def fake_servcie_get_all(context):
+ return fake_services_list
+
+
+def fake_service_get_by_host_binary(context, host, binary):
+ for service in fake_services_list:
+ if service['host'] == host and service['binary'] == binary:
+ return service
+ return None
+
+
+def fake_service_get_by_id(value):
+ for service in fake_services_list:
+ if service['id'] == value:
+ return service
+ return None
+
+
+def fake_service_update(context, service_id, values):
+ service = fake_service_get_by_id(service_id)
+ if service is None:
+ raise exception.ServiceNotFound(service_id=service_id)
+ else:
+ {'host': 'host1', 'service': 'nova-compute',
+ 'disabled': values['disabled']}
+
+
+def fake_utcnow():
+ return datetime(2012, 10, 29, 13, 42, 11)
+
+
+class ServicesTest(test.TestCase):
+
+ def setUp(self):
+ super(ServicesTest, self).setUp()
+
+ self.stubs.Set(db, "service_get_all", fake_servcie_get_all)
+ self.stubs.Set(timeutils, "utcnow", fake_utcnow)
+ self.stubs.Set(db, "service_get_by_args",
+ fake_service_get_by_host_binary)
+ self.stubs.Set(db, "service_update", fake_service_update)
+
+ self.context = context.get_admin_context()
+ self.controller = services.ServiceController()
+
+ def tearDown(self):
+ super(ServicesTest, self).tearDown()
+
+ def test_services_list(self):
+ req = FakeRequest()
+ res_dict = self.controller.index(req)
+
+ response = {'services': [{'binary': 'nova-scheduler',
+ 'host': 'host1', 'zone': 'nova',
+ 'status': 'disabled', 'state': 'up',
+ 'updated_at': datetime(2012, 10, 29, 13, 42, 2)},
+ {'binary': 'nova-compute',
+ 'host': 'host1', 'zone': 'nova',
+ 'status': 'disabled', 'state': 'up',
+ 'updated_at': datetime(2012, 10, 29, 13, 42, 5)},
+ {'binary': 'nova-scheduler', 'host': 'host2',
+ 'zone': 'nova',
+ 'status': 'enabled', 'state': 'down',
+ 'updated_at': datetime(2012, 9, 19, 6, 55, 34)},
+ {'binary': 'nova-compute', 'host': 'host2',
+ 'zone': 'nova',
+ 'status': 'disabled', 'state': 'down',
+ 'updated_at': datetime(2012, 9, 18, 8, 3, 38)}]}
+ self.assertEqual(res_dict, response)
+
+ def test_services_list_with_host(self):
+ req = FakeRequestWithHost()
+ res_dict = self.controller.index(req)
+
+ response = {'services': [{'binary': 'nova-scheduler', 'host': 'host1',
+ 'zone': 'nova',
+ 'status': 'disabled', 'state': 'up',
+ 'updated_at': datetime(2012, 10, 29, 13, 42, 2)},
+ {'binary': 'nova-compute', 'host': 'host1',
+ 'zone': 'nova',
+ 'status': 'disabled', 'state': 'up',
+ 'updated_at': datetime(2012, 10, 29, 13, 42, 5)}]}
+ self.assertEqual(res_dict, response)
+
+ def test_services_list_with_service(self):
+ req = FakeRequestWithSevice()
+ res_dict = self.controller.index(req)
+
+ response = {'services': [{'binary': 'nova-compute', 'host': 'host1',
+ 'zone': 'nova',
+ 'status': 'disabled', 'state': 'up',
+ 'updated_at': datetime(2012, 10, 29, 13, 42, 5)},
+ {'binary': 'nova-compute', 'host': 'host2',
+ 'zone': 'nova',
+ 'status': 'disabled', 'state': 'down',
+ 'updated_at': datetime(2012, 9, 18, 8, 3, 38)}]}
+ self.assertEqual(res_dict, response)
+
+ def test_services_list_with_host_service(self):
+ req = FakeRequestWithHostService()
+ res_dict = self.controller.index(req)
+
+ response = {'services': [{'binary': 'nova-compute', 'host': 'host1',
+ 'zone': 'nova',
+ 'status': 'disabled', 'state': 'up',
+ 'updated_at': datetime(2012, 10, 29, 13, 42, 5)}]}
+ self.assertEqual(res_dict, response)
+
+ def test_services_enable(self):
+ body = {'host': 'host1', 'service': 'nova-compute'}
+ req = fakes.HTTPRequest.blank('/v2/fake/os-services/enable')
+ res_dict = self.controller.update(req, "enable", body)
+
+ self.assertEqual(res_dict['disabled'], False)
+
+ def test_services_disable(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/os-services/disable')
+ body = {'host': 'host1', 'service': 'nova-compute'}
+ res_dict = self.controller.update(req, "disable", body)
+
+ self.assertEqual(res_dict['disabled'], True)
diff --git a/nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py b/nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py
index 0a4610aea..a10ec208c 100644
--- a/nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py
+++ b/nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py
@@ -22,8 +22,8 @@ import webob
from nova.api.openstack.compute.contrib import simple_tenant_usage
from nova.compute import api
+from nova import config
from nova import context
-from nova import flags
from nova.openstack.common import jsonutils
from nova.openstack.common import policy as common_policy
from nova.openstack.common import timeutils
@@ -31,9 +31,6 @@ from nova import policy
from nova import test
from nova.tests.api.openstack import fakes
-
-FLAGS = flags.FLAGS
-
SERVERS = 5
TENANTS = 2
HOURS = 24
diff --git a/nova/tests/api/openstack/compute/contrib/test_snapshots.py b/nova/tests/api/openstack/compute/contrib/test_snapshots.py
index 6e76fc04a..319991bfa 100644
--- a/nova/tests/api/openstack/compute/contrib/test_snapshots.py
+++ b/nova/tests/api/openstack/compute/contrib/test_snapshots.py
@@ -17,9 +17,9 @@ from lxml import etree
import webob
from nova.api.openstack.compute.contrib import volumes
+from nova import config
from nova import context
from nova import exception
-from nova import flags
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
@@ -27,8 +27,6 @@ from nova import test
from nova.tests.api.openstack import fakes
from nova.volume import cinder
-FLAGS = flags.FLAGS
-
LOG = logging.getLogger(__name__)
_last_param = {}
diff --git a/nova/tests/api/openstack/compute/contrib/test_volumes.py b/nova/tests/api/openstack/compute/contrib/test_volumes.py
index 6c092cbd4..4272b3011 100644
--- a/nova/tests/api/openstack/compute/contrib/test_volumes.py
+++ b/nova/tests/api/openstack/compute/contrib/test_volumes.py
@@ -21,9 +21,9 @@ import webob
from nova.api.openstack.compute.contrib import volumes
from nova.compute import api as compute_api
from nova.compute import instance_types
+from nova import config
from nova import context
from nova import db
-from nova import flags
from nova.openstack.common import jsonutils
from nova.openstack.common import timeutils
from nova import test
@@ -31,8 +31,7 @@ from nova.tests.api.openstack import fakes
from nova.volume import cinder
from webob import exc
-
-FLAGS = flags.FLAGS
+CONF = config.CONF
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
FAKE_UUID_A = '00000000-aaaa-aaaa-aaaa-000000000000'
@@ -129,23 +128,18 @@ class BootFromVolumeTest(test.TestCase):
self.assertEqual(res.status_int, 202)
server = jsonutils.loads(res.body)['server']
self.assertEqual(FAKE_UUID, server['id'])
- self.assertEqual(FLAGS.password_length, len(server['adminPass']))
+ self.assertEqual(CONF.password_length, len(server['adminPass']))
self.assertEqual(len(_block_device_mapping_seen), 1)
self.assertEqual(_block_device_mapping_seen[0]['volume_id'], 1)
self.assertEqual(_block_device_mapping_seen[0]['device_name'],
'/dev/vda')
-def return_volume(context, volume_id):
- return {'id': volume_id}
-
-
class VolumeApiTest(test.TestCase):
def setUp(self):
super(VolumeApiTest, self).setUp()
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
- self.stubs.Set(db, 'volume_get', return_volume)
self.stubs.Set(cinder.API, "delete", fakes.stub_volume_delete)
self.stubs.Set(cinder.API, "get", fakes.stub_volume_get)
diff --git a/nova/tests/api/openstack/compute/test_consoles.py b/nova/tests/api/openstack/compute/test_consoles.py
index 6ea8149cf..6b8d290c4 100644
--- a/nova/tests/api/openstack/compute/test_consoles.py
+++ b/nova/tests/api/openstack/compute/test_consoles.py
@@ -17,23 +17,23 @@
# under the License.
import datetime
+import uuid as stdlib_uuid
from lxml import etree
import webob
from nova.api.openstack.compute import consoles
from nova.compute import vm_states
+from nova import config
from nova import console
from nova import db
from nova import exception
-from nova import flags
from nova.openstack.common import timeutils
from nova import test
from nova.tests.api.openstack import fakes
-from nova import utils
+from nova.tests import matchers
-FLAGS = flags.FLAGS
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
@@ -58,7 +58,7 @@ class FakeInstanceDB(object):
if id is None:
id = self.max_id + 1
if uuid is None:
- uuid = str(utils.gen_uuid())
+ uuid = str(stdlib_uuid.uuid4())
instance = stub_instance(id, uuid=uuid)
self.instances_by_id[id] = instance
self.ids_by_uuid[uuid] = id
@@ -134,7 +134,7 @@ class ConsolesControllerTest(test.TestCase):
self.instance_db.return_server_by_id)
self.stubs.Set(db, 'instance_get_by_uuid',
self.instance_db.return_server_by_uuid)
- self.uuid = str(utils.gen_uuid())
+ self.uuid = str(stdlib_uuid.uuid4())
self.url = '/v2/fake/servers/%s/consoles' % self.uuid
self.controller = consoles.Controller()
@@ -167,7 +167,7 @@ class ConsolesControllerTest(test.TestCase):
req = fakes.HTTPRequest.blank(self.url + '/20')
res_dict = self.controller.show(req, self.uuid, '20')
- self.assertDictMatch(res_dict, expected)
+ self.assertThat(res_dict, matchers.DictMatches(expected))
def test_show_console_unknown_console(self):
def fake_get_console(cons_self, context, instance_id, console_id):
@@ -211,7 +211,7 @@ class ConsolesControllerTest(test.TestCase):
req = fakes.HTTPRequest.blank(self.url)
res_dict = self.controller.index(req, self.uuid)
- self.assertDictMatch(res_dict, expected)
+ self.assertThat(res_dict, matchers.DictMatches(expected))
def test_delete_console(self):
def fake_get_console(cons_self, context, instance_id, console_id):
diff --git a/nova/tests/api/openstack/compute/test_extensions.py b/nova/tests/api/openstack/compute/test_extensions.py
index 466fd3636..fe7f03cd6 100644
--- a/nova/tests/api/openstack/compute/test_extensions.py
+++ b/nova/tests/api/openstack/compute/test_extensions.py
@@ -25,13 +25,13 @@ from nova.api.openstack.compute import extensions as compute_extensions
from nova.api.openstack import extensions as base_extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
-from nova import flags
+from nova import config
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
+from nova.tests import matchers
-
-FLAGS = flags.FLAGS
+CONF = config.CONF
NS = "{http://docs.openstack.org/common/api/v1.0}"
ATOMNS = "{http://www.w3.org/2005/Atom}"
@@ -141,7 +141,7 @@ class StubExtensionManager(object):
class ExtensionTestCase(test.TestCase):
def setUp(self):
super(ExtensionTestCase, self).setUp()
- ext_list = FLAGS.osapi_compute_extension[:]
+ ext_list = CONF.osapi_compute_extension[:]
fox = ('nova.tests.api.openstack.compute.extensions.'
'foxinsocks.Foxinsocks')
if fox not in ext_list:
@@ -166,6 +166,7 @@ class ExtensionControllerTest(ExtensionTestCase):
"DiskConfig",
"ExtendedStatus",
"ExtendedServerAttributes",
+ "FixedIPs",
"FlavorAccess",
"FlavorDisabled",
"FlavorExtraSpecs",
@@ -189,6 +190,7 @@ class ExtensionControllerTest(ExtensionTestCase):
"SecurityGroups",
"ServerDiagnostics",
"ServerStartStop",
+ "Services",
"SimpleTenantUsage",
"UsedLimits",
"UserData",
@@ -347,7 +349,7 @@ class ResourceExtensionTest(ExtensionTestCase):
"code": 400
}
}
- self.assertDictMatch(expected, body)
+ self.assertThat(expected, matchers.DictMatches(body))
def test_non_exist_resource(self):
res_ext = base_extensions.ResourceExtension('tweedles',
@@ -365,7 +367,7 @@ class ResourceExtensionTest(ExtensionTestCase):
"code": 404
}
}
- self.assertDictMatch(expected, body)
+ self.assertThat(expected, matchers.DictMatches(body))
class InvalidExtension(object):
@@ -430,7 +432,7 @@ class ActionExtensionTest(ExtensionTestCase):
"code": 400
}
}
- self.assertDictMatch(expected, body)
+ self.assertThat(expected, matchers.DictMatches(body))
def test_non_exist_action(self):
body = dict(blah=dict(name="test"))
@@ -451,7 +453,7 @@ class ActionExtensionTest(ExtensionTestCase):
"code": 400
}
}
- self.assertDictMatch(expected, body)
+ self.assertThat(expected, matchers.DictMatches(body))
class RequestExtensionTest(ExtensionTestCase):
@@ -666,3 +668,31 @@ class ExtensionsXMLSerializerTest(test.TestCase):
self.assertEqual(link_nodes[i].get(key), value)
xmlutil.validate_schema(root, 'extensions')
+
+
+class ExtensionControllerIdFormatTest(test.TestCase):
+
+ def _bounce_id(self, test_id):
+
+ class BounceController(object):
+ def show(self, req, id):
+ return id
+ res_ext = base_extensions.ResourceExtension('bounce',
+ BounceController())
+ manager = StubExtensionManager(res_ext)
+ app = compute.APIRouter(manager)
+ request = webob.Request.blank("/fake/bounce/%s" % test_id)
+ response = request.get_response(app)
+ return response.body
+
+ def test_id_with_xml_format(self):
+ result = self._bounce_id('foo.xml')
+ self.assertEqual(result, 'foo')
+
+ def test_id_with_json_format(self):
+ result = self._bounce_id('foo.json')
+ self.assertEqual(result, 'foo')
+
+ def test_id_with_bad_format(self):
+ result = self._bounce_id('foo.bad')
+ self.assertEqual(result, 'foo.bad')
diff --git a/nova/tests/api/openstack/compute/test_flavors.py b/nova/tests/api/openstack/compute/test_flavors.py
index 77d40df03..16bb74801 100644
--- a/nova/tests/api/openstack/compute/test_flavors.py
+++ b/nova/tests/api/openstack/compute/test_flavors.py
@@ -23,16 +23,13 @@ import urlparse
from nova.api.openstack.compute import flavors
from nova.api.openstack import xmlutil
import nova.compute.instance_types
+from nova import config
from nova import context
from nova import db
from nova import exception
-from nova import flags
from nova import test
from nova.tests.api.openstack import fakes
-
-
-FLAGS = flags.FLAGS
-
+from nova.tests import matchers
NS = "{http://docs.openstack.org/compute/api/v1.1}"
ATOMNS = "{http://www.w3.org/2005/Atom}"
@@ -219,7 +216,7 @@ class FlavorsTest(test.TestCase):
'rel': 'next'}
]
}
- self.assertDictMatch(flavor, expected)
+ self.assertThat(flavor, matchers.DictMatches(expected))
def test_get_flavor_detail_with_limit(self):
req = fakes.HTTPRequest.blank('/v2/fake/flavors/detail?limit=1')
@@ -251,7 +248,8 @@ class FlavorsTest(test.TestCase):
href_parts = urlparse.urlparse(response_links[0]['href'])
self.assertEqual('/v2/fake/flavors', href_parts.path)
params = urlparse.parse_qs(href_parts.query)
- self.assertDictMatch({'limit': ['1'], 'marker': ['1']}, params)
+ self.assertThat({'limit': ['1'], 'marker': ['1']},
+ matchers.DictMatches(params))
def test_get_flavor_with_limit(self):
req = fakes.HTTPRequest.blank('/v2/fake/flavors?limit=2')
@@ -297,7 +295,8 @@ class FlavorsTest(test.TestCase):
href_parts = urlparse.urlparse(response_links[0]['href'])
self.assertEqual('/v2/fake/flavors', href_parts.path)
params = urlparse.parse_qs(href_parts.query)
- self.assertDictMatch({'limit': ['2'], 'marker': ['2']}, params)
+ self.assertThat({'limit': ['2'], 'marker': ['2']},
+ matchers.DictMatches(params))
def test_get_flavor_list_detail(self):
req = fakes.HTTPRequest.blank('/v2/fake/flavors/detail')
diff --git a/nova/tests/api/openstack/compute/test_image_metadata.py b/nova/tests/api/openstack/compute/test_image_metadata.py
index 1cd46902b..a5ac184ca 100644
--- a/nova/tests/api/openstack/compute/test_image_metadata.py
+++ b/nova/tests/api/openstack/compute/test_image_metadata.py
@@ -18,13 +18,12 @@
import webob
from nova.api.openstack.compute import image_metadata
-from nova import flags
+from nova import config
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
-
-FLAGS = flags.FLAGS
+CONF = config.CONF
class ImageMetaDataTest(test.TestCase):
@@ -134,7 +133,7 @@ class ImageMetaDataTest(test.TestCase):
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
req.method = 'PUT'
overload = {}
- for num in range(FLAGS.quota_metadata_items + 1):
+ for num in range(CONF.quota_metadata_items + 1):
overload['key%s' % num] = 'value%s' % num
body = {'meta': overload}
req.body = jsonutils.dumps(body)
@@ -176,7 +175,7 @@ class ImageMetaDataTest(test.TestCase):
def test_too_many_metadata_items_on_create(self):
data = {"metadata": {}}
- for num in range(FLAGS.quota_metadata_items + 1):
+ for num in range(CONF.quota_metadata_items + 1):
data['metadata']['key%i' % num] = "blah"
req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata')
req.method = 'POST'
diff --git a/nova/tests/api/openstack/compute/test_images.py b/nova/tests/api/openstack/compute/test_images.py
index af1dee30b..1ce2c960e 100644
--- a/nova/tests/api/openstack/compute/test_images.py
+++ b/nova/tests/api/openstack/compute/test_images.py
@@ -28,16 +28,13 @@ import webob
from nova.api.openstack.compute import images
from nova.api.openstack.compute.views import images as images_view
from nova.api.openstack import xmlutil
+from nova import config
from nova import exception
-from nova import flags
from nova import test
from nova.tests.api.openstack import fakes
+from nova.tests import matchers
from nova import utils
-
-FLAGS = flags.FLAGS
-
-
NS = "{http://docs.openstack.org/compute/api/v1.1}"
ATOMNS = "{http://www.w3.org/2005/Atom}"
NOW_API_FORMAT = "2010-10-11T10:30:22Z"
@@ -112,7 +109,7 @@ class ImagesControllerTest(test.TestCase):
},
}
- self.assertDictMatch(expected_image, actual_image)
+ self.assertThat(actual_image, matchers.DictMatches(expected_image))
def test_get_image_with_custom_prefix(self):
self.flags(osapi_compute_link_prefix='https://zoo.com:42',
@@ -166,7 +163,7 @@ class ImagesControllerTest(test.TestCase):
}],
},
}
- self.assertDictMatch(expected_image, actual_image)
+ self.assertThat(actual_image, matchers.DictMatches(expected_image))
def test_get_image_404(self):
fake_req = fakes.HTTPRequest.blank('/v2/fake/images/unknown')
@@ -461,7 +458,7 @@ class ImagesControllerTest(test.TestCase):
},
]
- self.assertDictListMatch(expected, response_list)
+ self.assertThat(expected, matchers.DictListMatches(response_list))
def test_get_image_details_with_limit(self):
request = fakes.HTTPRequest.blank('/v2/fake/images/detail?limit=2')
@@ -537,13 +534,14 @@ class ImagesControllerTest(test.TestCase):
}],
}]
- self.assertDictListMatch(expected, response_list)
+ self.assertThat(expected, matchers.DictListMatches(response_list))
href_parts = urlparse.urlparse(response_links[0]['href'])
self.assertEqual('/v2/fake/images', href_parts.path)
params = urlparse.parse_qs(href_parts.query)
- self.assertDictMatch({'limit': ['2'], 'marker': ['124']}, params)
+ self.assertThat({'limit': ['2'], 'marker': ['124']},
+ matchers.DictMatches(params))
def test_image_detail_filter_with_name(self):
image_service = self.mox.CreateMockAnything()
diff --git a/nova/tests/api/openstack/compute/test_limits.py b/nova/tests/api/openstack/compute/test_limits.py
index 84c000035..32e7ab9e0 100644
--- a/nova/tests/api/openstack/compute/test_limits.py
+++ b/nova/tests/api/openstack/compute/test_limits.py
@@ -30,6 +30,7 @@ from nova.api.openstack import xmlutil
import nova.context
from nova.openstack.common import jsonutils
from nova import test
+from nova.tests import matchers
TEST_LIMITS = [
@@ -120,8 +121,6 @@ class LimitsControllerTest(BaseLimitTestSuite):
'ram': 512,
'instances': 5,
'cores': 21,
- 'gigabytes': 512,
- 'volumes': 5,
'key_pairs': 10,
'floating_ips': 10,
'security_groups': 10,
@@ -170,8 +169,6 @@ class LimitsControllerTest(BaseLimitTestSuite):
"maxTotalRAMSize": 512,
"maxTotalInstances": 5,
"maxTotalCores": 21,
- "maxTotalVolumeGigabytes": 512,
- "maxTotalVolumes": 5,
"maxTotalKeypairs": 10,
"maxTotalFloatingIps": 10,
"maxSecurityGroups": 10,
@@ -866,7 +863,7 @@ class LimitsViewBuilderTest(test.TestCase):
output = self.view_builder.build(self.rate_limits,
self.absolute_limits)
- self.assertDictMatch(output, expected_limits)
+ self.assertThat(output, matchers.DictMatches(expected_limits))
def test_build_limits_empty_limits(self):
expected_limits = {"limits": {"rate": [],
@@ -875,7 +872,7 @@ class LimitsViewBuilderTest(test.TestCase):
abs_limits = {}
rate_limits = []
output = self.view_builder.build(rate_limits, abs_limits)
- self.assertDictMatch(output, expected_limits)
+ self.assertThat(output, matchers.DictMatches(expected_limits))
class LimitsXMLSerializationTest(test.TestCase):
diff --git a/nova/tests/api/openstack/compute/test_server_actions.py b/nova/tests/api/openstack/compute/test_server_actions.py
index 0afa00f2b..002f51010 100644
--- a/nova/tests/api/openstack/compute/test_server_actions.py
+++ b/nova/tests/api/openstack/compute/test_server_actions.py
@@ -14,6 +14,7 @@
# under the License.
import base64
+import uuid
import mox
import webob
@@ -22,18 +23,18 @@ from nova.api.openstack.compute import servers
from nova.compute import api as compute_api
from nova.compute import task_states
from nova.compute import vm_states
+from nova import config
from nova import db
from nova import exception
-from nova import flags
from nova.image import glance
from nova.openstack.common import importutils
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests.image import fake
-from nova import utils
+from nova.tests import matchers
-FLAGS = flags.FLAGS
+CONF = config.CONF
FAKE_UUID = fakes.FAKE_UUID
INSTANCE_IDS = {FAKE_UUID: 1}
@@ -176,7 +177,7 @@ class ServerActionsControllerTest(test.TestCase):
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller._action_reboot,
- req, str(utils.gen_uuid()), body)
+ req, str(uuid.uuid4()), body)
def test_reboot_raises_conflict_on_invalid_state(self):
body = dict(reboot=dict(type="HARD"))
@@ -209,7 +210,7 @@ class ServerActionsControllerTest(test.TestCase):
self.assertEqual(body['server']['image']['id'], '2')
self.assertEqual(len(body['server']['adminPass']),
- FLAGS.password_length)
+ CONF.password_length)
self.assertEqual(robj['location'], self_href)
@@ -825,7 +826,7 @@ class ServerActionsControllerTest(test.TestCase):
'metadata': {},
},
}
- for num in range(FLAGS.quota_metadata_items + 1):
+ for num in range(CONF.quota_metadata_items + 1):
body['createImage']['metadata']['foo%i' % num] = "bar"
req = fakes.HTTPRequest.blank(self.url)
@@ -1052,7 +1053,7 @@ class TestServerActionXMLDeserializer(test.TestCase):
],
},
}
- self.assertDictMatch(request['body'], expected)
+ self.assertThat(request['body'], matchers.DictMatches(expected))
def test_rebuild_minimum(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
@@ -1065,7 +1066,7 @@ class TestServerActionXMLDeserializer(test.TestCase):
"imageRef": "http://localhost/images/1",
},
}
- self.assertDictMatch(request['body'], expected)
+ self.assertThat(request['body'], matchers.DictMatches(expected))
def test_rebuild_no_imageRef(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
diff --git a/nova/tests/api/openstack/compute/test_server_metadata.py b/nova/tests/api/openstack/compute/test_server_metadata.py
index bcce624d7..5f595e3a5 100644
--- a/nova/tests/api/openstack/compute/test_server_metadata.py
+++ b/nova/tests/api/openstack/compute/test_server_metadata.py
@@ -15,20 +15,21 @@
# License for the specific language governing permissions and limitations
# under the License.
+import uuid
+
import webob
from nova.api.openstack.compute import server_metadata
from nova.compute import rpcapi as compute_rpcapi
+from nova import config
import nova.db
from nova import exception
-from nova import flags
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
-from nova import utils
-FLAGS = flags.FLAGS
+CONF = config.CONF
def return_create_instance_metadata_max(context, server_id, metadata, delete):
@@ -65,7 +66,7 @@ def stub_server_metadata():
def stub_max_server_metadata():
metadata = {"metadata": {}}
- for num in range(FLAGS.quota_metadata_items):
+ for num in range(CONF.quota_metadata_items):
metadata['metadata']['key%i' % num] = "blah"
return metadata
@@ -108,7 +109,7 @@ class ServerMetaDataTest(test.TestCase):
fake_change_instance_metadata)
self.controller = server_metadata.Controller()
- self.uuid = str(utils.gen_uuid())
+ self.uuid = str(uuid.uuid4())
self.url = '/v1.1/fake/servers/%s/metadata' % self.uuid
def test_index(self):
@@ -416,7 +417,7 @@ class ServerMetaDataTest(test.TestCase):
self.stubs.Set(nova.db, 'instance_metadata_update',
return_create_instance_metadata)
data = {"metadata": {}}
- for num in range(FLAGS.quota_metadata_items + 1):
+ for num in range(CONF.quota_metadata_items + 1):
data['metadata']['key%i' % num] = "blah"
req = fakes.HTTPRequest.blank(self.url)
req.method = 'POST'
@@ -455,7 +456,7 @@ class ServerMetaDataTest(test.TestCase):
self.stubs.Set(nova.db, 'instance_metadata_update',
return_create_instance_metadata)
data = {"metadata": {}}
- for num in range(FLAGS.quota_metadata_items + 1):
+ for num in range(CONF.quota_metadata_items + 1):
data['metadata']['key%i' % num] = "blah"
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
@@ -469,7 +470,7 @@ class ServerMetaDataTest(test.TestCase):
self.stubs.Set(nova.db, 'instance_metadata_update',
return_create_instance_metadata)
data = {"metadata": {}}
- for num in range(FLAGS.quota_metadata_items + 1):
+ for num in range(CONF.quota_metadata_items + 1):
data['metadata']['key%i' % num] = "blah"
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
diff --git a/nova/tests/api/openstack/compute/test_servers.py b/nova/tests/api/openstack/compute/test_servers.py
index afa181ee3..0b5f3cf67 100644
--- a/nova/tests/api/openstack/compute/test_servers.py
+++ b/nova/tests/api/openstack/compute/test_servers.py
@@ -19,6 +19,7 @@
import base64
import datetime
import urlparse
+import uuid
import iso8601
from lxml import etree
@@ -34,11 +35,11 @@ from nova.compute import api as compute_api
from nova.compute import instance_types
from nova.compute import task_states
from nova.compute import vm_states
+from nova import config
from nova import context
from nova import db
from nova.db.sqlalchemy import models
from nova import exception
-from nova import flags
from nova.network import manager
from nova.network.quantumv2 import api as quantum_api
from nova.openstack.common import jsonutils
@@ -47,10 +48,11 @@ from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import fake_network
from nova.tests.image import fake
-from nova import utils
+from nova.tests import matchers
-FLAGS = flags.FLAGS
+CONF = config.CONF
+
FAKE_UUID = fakes.FAKE_UUID
NS = "{http://docs.openstack.org/compute/api/v1.1}"
ATOMNS = "{http://www.w3.org/2005/Atom}"
@@ -236,7 +238,7 @@ class ServersControllerTest(test.TestCase):
"""Create two servers with the same host and different
project_ids and check that the hostId's are unique"""
def return_instance_with_host(self, *args):
- project_id = str(utils.gen_uuid())
+ project_id = str(uuid.uuid4())
return fakes.stub_instance(id=1, uuid=FAKE_UUID,
project_id=project_id,
host='fake_host')
@@ -315,7 +317,7 @@ class ServersControllerTest(test.TestCase):
}
}
- self.assertDictMatch(res_dict, expected_server)
+ self.assertThat(res_dict, matchers.DictMatches(expected_server))
def test_get_server_with_active_status_by_id(self):
image_bookmark = "http://localhost/fake/images/10"
@@ -381,7 +383,7 @@ class ServersControllerTest(test.TestCase):
}
}
- self.assertDictMatch(res_dict, expected_server)
+ self.assertThat(res_dict, matchers.DictMatches(expected_server))
def test_get_server_with_id_image_ref_by_id(self):
image_ref = "10"
@@ -450,7 +452,7 @@ class ServersControllerTest(test.TestCase):
}
}
- self.assertDictMatch(res_dict, expected_server)
+ self.assertThat(res_dict, matchers.DictMatches(expected_server))
def test_get_server_addresses_from_cache(self):
pub0 = ('172.19.0.1', '172.19.0.2',)
@@ -501,7 +503,7 @@ class ServersControllerTest(test.TestCase):
],
},
}
- self.assertDictMatch(res_dict, expected)
+ self.assertThat(res_dict, matchers.DictMatches(expected))
def test_get_server_addresses_nonexistent_network(self):
url = '/v2/fake/servers/%s/ips/network_0' % FAKE_UUID
@@ -515,7 +517,7 @@ class ServersControllerTest(test.TestCase):
self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get)
- server_id = str(utils.gen_uuid())
+ server_id = str(uuid.uuid4())
req = fakes.HTTPRequest.blank('/v2/fake/servers/%s/ips' % server_id)
self.assertRaises(webob.exc.HTTPNotFound,
self.ips_controller.index, req, server_id)
@@ -596,7 +598,7 @@ class ServersControllerTest(test.TestCase):
params = urlparse.parse_qs(href_parts.query)
expected_params = {'limit': ['3'],
'marker': [fakes.get_fake_uuid(2)]}
- self.assertDictMatch(expected_params, params)
+ self.assertThat(params, matchers.DictMatches(expected_params))
def test_get_servers_with_limit_bad_value(self):
req = fakes.HTTPRequest.blank('/v2/fake/servers?limit=aaa')
@@ -618,7 +620,7 @@ class ServersControllerTest(test.TestCase):
self.assertEqual('/v2/fake/servers', href_parts.path)
params = urlparse.parse_qs(href_parts.query)
expected = {'limit': ['3'], 'marker': [fakes.get_fake_uuid(2)]}
- self.assertDictMatch(expected, params)
+ self.assertThat(params, matchers.DictMatches(expected))
def test_get_server_details_with_limit_bad_value(self):
req = fakes.HTTPRequest.blank('/v2/fake/servers/detail?limit=aaa')
@@ -640,9 +642,9 @@ class ServersControllerTest(test.TestCase):
href_parts = urlparse.urlparse(servers_links[0]['href'])
self.assertEqual('/v2/fake/servers', href_parts.path)
params = urlparse.parse_qs(href_parts.query)
-
- self.assertDictMatch({'limit': ['3'], 'blah': ['2:t'],
- 'marker': [fakes.get_fake_uuid(2)]}, params)
+ expected = {'limit': ['3'], 'blah': ['2:t'],
+ 'marker': [fakes.get_fake_uuid(2)]}
+ self.assertThat(params, matchers.DictMatches(expected))
def test_get_servers_with_too_big_limit(self):
req = fakes.HTTPRequest.blank('/v2/fake/servers?limit=30')
@@ -672,7 +674,7 @@ class ServersControllerTest(test.TestCase):
self.controller.index, req)
def test_get_servers_with_bad_option(self):
- server_uuid = str(utils.gen_uuid())
+ server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
sort_key=None, sort_dir='desc',
@@ -688,7 +690,7 @@ class ServersControllerTest(test.TestCase):
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_servers_allows_image(self):
- server_uuid = str(utils.gen_uuid())
+ server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
sort_key=None, sort_dir='desc',
@@ -771,7 +773,7 @@ class ServersControllerTest(test.TestCase):
self.assertTrue('servers' in res)
def test_get_servers_allows_flavor(self):
- server_uuid = str(utils.gen_uuid())
+ server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
sort_key=None, sort_dir='desc',
@@ -791,7 +793,7 @@ class ServersControllerTest(test.TestCase):
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_servers_allows_status(self):
- server_uuid = str(utils.gen_uuid())
+ server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
sort_key=None, sort_dir='desc',
@@ -823,7 +825,7 @@ class ServersControllerTest(test.TestCase):
self.controller.detail, req)
def test_get_servers_deleted_status_as_admin(self):
- server_uuid = str(utils.gen_uuid())
+ server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
sort_key=None, sort_dir='desc',
@@ -843,7 +845,7 @@ class ServersControllerTest(test.TestCase):
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_servers_allows_name(self):
- server_uuid = str(utils.gen_uuid())
+ server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
sort_key=None, sort_dir='desc',
@@ -862,7 +864,7 @@ class ServersControllerTest(test.TestCase):
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_servers_allows_changes_since(self):
- server_uuid = str(utils.gen_uuid())
+ server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
sort_key=None, sort_dir='desc',
@@ -894,7 +896,7 @@ class ServersControllerTest(test.TestCase):
context is not admin. Make sure the admin and unknown options
are stripped before they get to compute_api.get_all()
"""
- server_uuid = str(utils.gen_uuid())
+ server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
sort_key=None, sort_dir='desc',
@@ -923,7 +925,7 @@ class ServersControllerTest(test.TestCase):
"""Test getting servers by admin-only or unknown options when
context is admin. All options should be passed
"""
- server_uuid = str(utils.gen_uuid())
+ server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
sort_key=None, sort_dir='desc',
@@ -952,7 +954,7 @@ class ServersControllerTest(test.TestCase):
"""Test getting servers by ip with admin_api enabled and
admin context
"""
- server_uuid = str(utils.gen_uuid())
+ server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
sort_key=None, sort_dir='desc',
@@ -975,7 +977,7 @@ class ServersControllerTest(test.TestCase):
"""Test getting servers by ip6 with admin_api enabled and
admin context
"""
- server_uuid = str(utils.gen_uuid())
+ server_uuid = str(uuid.uuid4())
def fake_get_all(compute_self, context, search_opts=None,
sort_key=None, sort_dir='desc',
@@ -1670,7 +1672,7 @@ class ServersControllerCreateTest(test.TestCase):
def rpc_call_wrapper(context, topic, msg, timeout=None):
"""Stub out the scheduler creating the instance entry"""
- if (topic == FLAGS.scheduler_topic and
+ if (topic == CONF.scheduler_topic and
msg['method'] == 'run_instance'):
request_spec = msg['args']['request_spec']
num_instances = request_spec.get('num_instances', 1)
@@ -1698,7 +1700,7 @@ class ServersControllerCreateTest(test.TestCase):
fakes.stub_out_key_pair_funcs(self.stubs)
fake.stub_out_image_service(self.stubs)
fakes.stub_out_nw_api(self.stubs)
- self.stubs.Set(utils, 'gen_uuid', fake_gen_uuid)
+ self.stubs.Set(uuid, 'uuid4', fake_gen_uuid)
self.stubs.Set(db, 'instance_add_security_group',
return_security_group)
self.stubs.Set(db, 'project_get_networks',
@@ -1720,7 +1722,7 @@ class ServersControllerCreateTest(test.TestCase):
length.
"""
- self.assertEqual(FLAGS.password_length,
+ self.assertEqual(CONF.password_length,
len(server_dict["adminPass"]))
def _check_admin_pass_missing(self, server_dict):
@@ -2784,7 +2786,6 @@ class ServersControllerCreateTest(test.TestCase):
'open': 'stack',
},
'personality': {},
- 'config_drive': True,
},
}
@@ -3253,7 +3254,7 @@ class TestServerCreateRequestXMLDeserializer(test.TestCase):
],
},
}
- self.assertDictMatch(request['body'], expected)
+ self.assertThat(request['body'], matchers.DictMatches(expected))
def test_spec_request(self):
image_bookmark_link = ("http://servers.api.openstack.org/1234/"
@@ -3698,7 +3699,7 @@ class ServersViewBuilderTest(test.TestCase):
}
output = self.view_builder.basic(self.request, self.instance)
- self.assertDictMatch(output, expected_server)
+ self.assertThat(output, matchers.DictMatches(expected_server))
def test_build_server_with_project_id(self):
expected_server = {
@@ -3720,7 +3721,7 @@ class ServersViewBuilderTest(test.TestCase):
}
output = self.view_builder.basic(self.request, self.instance)
- self.assertDictMatch(output, expected_server)
+ self.assertThat(output, matchers.DictMatches(expected_server))
def test_build_server_detail(self):
image_bookmark = "http://localhost/fake/images/5"
@@ -3779,7 +3780,7 @@ class ServersViewBuilderTest(test.TestCase):
}
output = self.view_builder.show(self.request, self.instance)
- self.assertDictMatch(output, expected_server)
+ self.assertThat(output, matchers.DictMatches(expected_server))
def test_build_server_detail_with_fault(self):
self.instance['vm_state'] = vm_states.ERROR
@@ -3853,7 +3854,7 @@ class ServersViewBuilderTest(test.TestCase):
self.request.context = context.RequestContext('fake', 'fake')
output = self.view_builder.show(self.request, self.instance)
- self.assertDictMatch(output, expected_server)
+ self.assertThat(output, matchers.DictMatches(expected_server))
def test_build_server_detail_with_fault_no_details_not_admin(self):
self.instance['vm_state'] = vm_states.ERROR
@@ -3871,7 +3872,8 @@ class ServersViewBuilderTest(test.TestCase):
self.request.context = context.RequestContext('fake', 'fake')
output = self.view_builder.show(self.request, self.instance)
- self.assertDictMatch(output['server']['fault'], expected_fault)
+ self.assertThat(output['server']['fault'],
+ matchers.DictMatches(expected_fault))
def test_build_server_detail_with_fault_admin(self):
self.instance['vm_state'] = vm_states.ERROR
@@ -3890,7 +3892,8 @@ class ServersViewBuilderTest(test.TestCase):
self.request.context = context.get_admin_context()
output = self.view_builder.show(self.request, self.instance)
- self.assertDictMatch(output['server']['fault'], expected_fault)
+ self.assertThat(output['server']['fault'],
+ matchers.DictMatches(expected_fault))
def test_build_server_detail_with_fault_no_details_admin(self):
self.instance['vm_state'] = vm_states.ERROR
@@ -3908,7 +3911,8 @@ class ServersViewBuilderTest(test.TestCase):
self.request.context = context.get_admin_context()
output = self.view_builder.show(self.request, self.instance)
- self.assertDictMatch(output['server']['fault'], expected_fault)
+ self.assertThat(output['server']['fault'],
+ matchers.DictMatches(expected_fault))
def test_build_server_detail_with_fault_but_active(self):
self.instance['vm_state'] = vm_states.ACTIVE
@@ -3989,7 +3993,7 @@ class ServersViewBuilderTest(test.TestCase):
}
output = self.view_builder.show(self.request, self.instance)
- self.assertDictMatch(output, expected_server)
+ self.assertThat(output, matchers.DictMatches(expected_server))
def test_build_server_detail_with_accessipv4(self):
@@ -4051,7 +4055,7 @@ class ServersViewBuilderTest(test.TestCase):
}
output = self.view_builder.show(self.request, self.instance)
- self.assertDictMatch(output, expected_server)
+ self.assertThat(output, matchers.DictMatches(expected_server))
def test_build_server_detail_with_accessipv6(self):
@@ -4113,7 +4117,7 @@ class ServersViewBuilderTest(test.TestCase):
}
output = self.view_builder.show(self.request, self.instance)
- self.assertDictMatch(output, expected_server)
+ self.assertThat(output, matchers.DictMatches(expected_server))
def test_build_server_detail_with_metadata(self):
@@ -4177,7 +4181,7 @@ class ServersViewBuilderTest(test.TestCase):
}
output = self.view_builder.show(self.request, self.instance)
- self.assertDictMatch(output, expected_server)
+ self.assertThat(output, matchers.DictMatches(expected_server))
class ServerXMLSerializationTest(test.TestCase):
diff --git a/nova/tests/api/openstack/compute/test_versions.py b/nova/tests/api/openstack/compute/test_versions.py
index 4520faa48..16790860c 100644
--- a/nova/tests/api/openstack/compute/test_versions.py
+++ b/nova/tests/api/openstack/compute/test_versions.py
@@ -15,6 +15,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import uuid as stdlib_uuid
+
import feedparser
from lxml import etree
import webob
@@ -26,7 +28,7 @@ from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import common
from nova.tests.api.openstack import fakes
-from nova import utils
+from nova.tests import matchers
NS = {
@@ -346,7 +348,8 @@ class VersionsTest(test.TestCase):
},
], }
- self.assertDictMatch(expected, jsonutils.loads(res.body))
+ self.assertThat(jsonutils.loads(res.body),
+ matchers.DictMatches(expected))
def test_multi_choice_image_xml(self):
req = webob.Request.blank('/images/1')
@@ -383,7 +386,7 @@ class VersionsTest(test.TestCase):
self.assertEqual(res.content_type, "application/json")
def test_multi_choice_server(self):
- uuid = str(utils.gen_uuid())
+ uuid = str(stdlib_uuid.uuid4())
req = webob.Request.blank('/servers/' + uuid)
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app())
@@ -416,7 +419,8 @@ class VersionsTest(test.TestCase):
},
], }
- self.assertDictMatch(expected, jsonutils.loads(res.body))
+ self.assertThat(jsonutils.loads(res.body),
+ matchers.DictMatches(expected))
class VersionsViewBuilderTests(test.TestCase):
diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py
index 4f39e569e..d84b215b2 100644
--- a/nova/tests/api/openstack/fakes.py
+++ b/nova/tests/api/openstack/fakes.py
@@ -16,6 +16,7 @@
# under the License.
import datetime
+import uuid
import glanceclient.v1.images
import routes
@@ -34,6 +35,7 @@ from nova.api.openstack import wsgi as os_wsgi
from nova.compute import api as compute_api
from nova.compute import instance_types
from nova.compute import vm_states
+import nova.config
from nova import context
from nova.db.sqlalchemy import models
from nova import exception as exc
@@ -44,7 +46,6 @@ from nova.openstack.common import timeutils
from nova import quota
from nova.tests import fake_network
from nova.tests.glance import stubs as glance_stubs
-from nova import utils
from nova import wsgi
@@ -151,7 +152,7 @@ def stub_out_instance_quota(stubs, allowed, quota, resource='instances'):
def stub_out_networking(stubs):
def get_my_ip():
return '127.0.0.1'
- stubs.Set(nova.flags, '_get_my_ip', get_my_ip)
+ stubs.Set(nova.config, '_get_my_ip', get_my_ip)
def stub_out_compute_api_snapshot(stubs):
@@ -373,7 +374,7 @@ def create_info_cache(nw_cache):
def get_fake_uuid(token=0):
if not token in FAKE_UUIDS:
- FAKE_UUIDS[token] = str(utils.gen_uuid())
+ FAKE_UUIDS[token] = str(uuid.uuid4())
return FAKE_UUIDS[token]
diff --git a/nova/tests/api/test_sizelimit.py b/nova/tests/api/test_sizelimit.py
index 170198b6b..0eb46616f 100644
--- a/nova/tests/api/test_sizelimit.py
+++ b/nova/tests/api/test_sizelimit.py
@@ -15,11 +15,11 @@
import webob
import nova.api.sizelimit
-from nova import flags
+from nova import config
from nova import test
-FLAGS = flags.FLAGS
-MAX_REQUEST_BODY_SIZE = FLAGS.osapi_max_request_body_size
+CONF = config.CONF
+MAX_REQUEST_BODY_SIZE = CONF.osapi_max_request_body_size
class TestRequestBodySizeLimiter(test.TestCase):
diff --git a/nova/tests/baremetal/db/__init__.py b/nova/tests/baremetal/db/__init__.py
new file mode 100644
index 000000000..19071662c
--- /dev/null
+++ b/nova/tests/baremetal/db/__init__.py
@@ -0,0 +1,14 @@
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
diff --git a/nova/tests/baremetal/db/base.py b/nova/tests/baremetal/db/base.py
new file mode 100644
index 000000000..83abcb58e
--- /dev/null
+++ b/nova/tests/baremetal/db/base.py
@@ -0,0 +1,51 @@
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Bare-metal DB test base class."""
+
+from nova import config
+from nova import context as nova_context
+from nova import test
+from nova.virt.baremetal.db import migration as bm_migration
+from nova.virt.baremetal.db.sqlalchemy import session as bm_session
+
+_DB = None
+
+CONF = config.CONF
+CONF.import_opt('baremetal_sql_connection',
+ 'nova.virt.baremetal.db.sqlalchemy.session')
+
+
+def _reset_bmdb():
+ global _DB
+ engine = bm_session.get_engine()
+ engine.dispose()
+ conn = engine.connect()
+ if _DB is None:
+ if bm_migration.db_version() > bm_migration.INIT_VERSION:
+ return
+ bm_migration.db_sync()
+ _DB = "".join(line for line in conn.connection.iterdump())
+ else:
+ conn.connection.executescript(_DB)
+
+
+class BMDBTestCase(test.TestCase):
+
+ def setUp(self):
+ super(BMDBTestCase, self).setUp()
+ self.flags(baremetal_sql_connection='sqlite:///:memory:')
+ _reset_bmdb()
+ self.context = nova_context.get_admin_context()
diff --git a/nova/tests/baremetal/db/test_bm_interface.py b/nova/tests/baremetal/db/test_bm_interface.py
new file mode 100644
index 000000000..6aef437c1
--- /dev/null
+++ b/nova/tests/baremetal/db/test_bm_interface.py
@@ -0,0 +1,47 @@
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Bare-metal DB testcase for BareMetalInterface
+"""
+
+from nova import exception
+from nova.tests.baremetal.db import base
+from nova.virt.baremetal import db
+
+
+class BareMetalInterfaceTestCase(base.BMDBTestCase):
+
+ def test_unique_address(self):
+ pif1_id = db.bm_interface_create(self.context, 1, '11:11:11:11:11:11',
+ '0x1', 1)
+ self.assertRaises(exception.DBError,
+ db.bm_interface_create,
+ self.context, 2, '11:11:11:11:11:11', '0x2', 2)
+ # succeed after delete pif1
+ db.bm_interface_destroy(self.context, pif1_id)
+ pif2_id = db.bm_interface_create(self.context, 2, '11:11:11:11:11:11',
+ '0x2', 2)
+ self.assertTrue(pif2_id is not None)
+
+ def test_unique_vif_uuid(self):
+ pif1_id = db.bm_interface_create(self.context, 1, '11:11:11:11:11:11',
+ '0x1', 1)
+ pif2_id = db.bm_interface_create(self.context, 2, '22:22:22:22:22:22',
+ '0x2', 2)
+ db.bm_interface_set_vif_uuid(self.context, pif1_id, 'AAAA')
+ self.assertRaises(exception.DBError,
+ db.bm_interface_set_vif_uuid,
+ self.context, pif2_id, 'AAAA')
diff --git a/nova/tests/baremetal/db/test_bm_node.py b/nova/tests/baremetal/db/test_bm_node.py
new file mode 100644
index 000000000..062b209a6
--- /dev/null
+++ b/nova/tests/baremetal/db/test_bm_node.py
@@ -0,0 +1,140 @@
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Bare-Metal DB testcase for BareMetalNode
+"""
+
+from nova.tests.baremetal.db import base
+from nova.tests.baremetal.db import utils
+from nova.virt.baremetal import db
+
+
+class BareMetalNodesTestCase(base.BMDBTestCase):
+
+ def _create_nodes(self):
+ nodes = [
+ utils.new_bm_node(pm_address='0', service_host="host1",
+ memory_mb=100000, cpus=100, local_gb=10000),
+ utils.new_bm_node(pm_address='1', service_host="host2",
+ instance_uuid='A',
+ memory_mb=100000, cpus=100, local_gb=10000),
+ utils.new_bm_node(pm_address='2', service_host="host2",
+ memory_mb=1000, cpus=1, local_gb=1000),
+ utils.new_bm_node(pm_address='3', service_host="host2",
+ memory_mb=1000, cpus=2, local_gb=1000),
+ utils.new_bm_node(pm_address='4', service_host="host2",
+ memory_mb=2000, cpus=1, local_gb=1000),
+ utils.new_bm_node(pm_address='5', service_host="host2",
+ memory_mb=2000, cpus=2, local_gb=1000),
+ ]
+ self.ids = []
+ for n in nodes:
+ ref = db.bm_node_create(self.context, n)
+ self.ids.append(ref['id'])
+
+ def test_get_all0(self):
+ r = db.bm_node_get_all(self.context)
+ self.assertEquals(r, [])
+
+ def test_get_all(self):
+ r = db.bm_node_get_all(self.context)
+ self.assertEquals(r, [])
+
+ self._create_nodes()
+
+ r = db.bm_node_get_all(self.context)
+ self.assertEquals(len(r), 6)
+
+ def test_get(self):
+ self._create_nodes()
+
+ r = db.bm_node_get(self.context, self.ids[0])
+ self.assertEquals(r['pm_address'], '0')
+
+ r = db.bm_node_get(self.context, self.ids[1])
+ self.assertEquals(r['pm_address'], '1')
+
+ r = db.bm_node_get(self.context, -1)
+ self.assertTrue(r is None)
+
+ def test_get_by_service_host(self):
+ self._create_nodes()
+
+ r = db.bm_node_get_all(self.context, service_host=None)
+ self.assertEquals(len(r), 6)
+
+ r = db.bm_node_get_all(self.context, service_host="host1")
+ self.assertEquals(len(r), 1)
+ self.assertEquals(r[0]['pm_address'], '0')
+
+ r = db.bm_node_get_all(self.context, service_host="host2")
+ self.assertEquals(len(r), 5)
+ pmaddrs = [x['pm_address'] for x in r]
+ self.assertIn('1', pmaddrs)
+ self.assertIn('2', pmaddrs)
+ self.assertIn('3', pmaddrs)
+ self.assertIn('4', pmaddrs)
+ self.assertIn('5', pmaddrs)
+
+ r = db.bm_node_get_all(self.context, service_host="host3")
+ self.assertEquals(r, [])
+
+ def test_destroy(self):
+ self._create_nodes()
+
+ db.bm_node_destroy(self.context, self.ids[0])
+
+ r = db.bm_node_get(self.context, self.ids[0])
+ self.assertTrue(r is None)
+
+ r = db.bm_node_get_all(self.context)
+ self.assertEquals(len(r), 5)
+
+ def test_find_free(self):
+ self._create_nodes()
+ fn = db.bm_node_find_free(self.context, 'host2')
+ self.assertEqual(fn['pm_address'], '2')
+
+ fn = db.bm_node_find_free(self.context, 'host2',
+ memory_mb=500, cpus=2, local_gb=100)
+ self.assertEqual(fn['pm_address'], '3')
+
+ fn = db.bm_node_find_free(self.context, 'host2',
+ memory_mb=1001, cpus=1, local_gb=1000)
+ self.assertEqual(fn['pm_address'], '4')
+
+ fn = db.bm_node_find_free(self.context, 'host2',
+ memory_mb=2000, cpus=1, local_gb=1000)
+ self.assertEqual(fn['pm_address'], '4')
+
+ fn = db.bm_node_find_free(self.context, 'host2',
+ memory_mb=2000, cpus=2, local_gb=1000)
+ self.assertEqual(fn['pm_address'], '5')
+
+ # check memory_mb
+ fn = db.bm_node_find_free(self.context, 'host2',
+ memory_mb=2001, cpus=2, local_gb=1000)
+ self.assertTrue(fn is None)
+
+ # check cpus
+ fn = db.bm_node_find_free(self.context, 'host2',
+ memory_mb=2000, cpus=3, local_gb=1000)
+ self.assertTrue(fn is None)
+
+ # check local_gb
+ fn = db.bm_node_find_free(self.context, 'host2',
+ memory_mb=2000, cpus=2, local_gb=1001)
+ self.assertTrue(fn is None)
diff --git a/nova/tests/baremetal/db/test_bm_pxe_ip.py b/nova/tests/baremetal/db/test_bm_pxe_ip.py
new file mode 100644
index 000000000..9a93b46ad
--- /dev/null
+++ b/nova/tests/baremetal/db/test_bm_pxe_ip.py
@@ -0,0 +1,93 @@
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Bare-metal DB testcase for BareMetalPxeIp
+"""
+
+from nova import exception
+from nova.tests.baremetal.db import base
+from nova.tests.baremetal.db import utils
+from nova.virt.baremetal import db
+
+
+class BareMetalPxeIpTestCase(base.BMDBTestCase):
+
+ def _create_pxe_ip(self):
+ i1 = utils.new_bm_pxe_ip(address='10.1.1.1',
+ server_address='10.1.1.101')
+ i2 = utils.new_bm_pxe_ip(address='10.1.1.2',
+ server_address='10.1.1.102')
+
+ i1_ref = db.bm_pxe_ip_create_direct(self.context, i1)
+ self.assertTrue(i1_ref['id'] is not None)
+ self.assertEqual(i1_ref['address'], '10.1.1.1')
+ self.assertEqual(i1_ref['server_address'], '10.1.1.101')
+
+ i2_ref = db.bm_pxe_ip_create_direct(self.context, i2)
+ self.assertTrue(i2_ref['id'] is not None)
+ self.assertEqual(i2_ref['address'], '10.1.1.2')
+ self.assertEqual(i2_ref['server_address'], '10.1.1.102')
+
+ self.i1 = i1_ref
+ self.i2 = i2_ref
+
+ def test_unuque_address(self):
+ self._create_pxe_ip()
+
+ # address duplicates
+ i = utils.new_bm_pxe_ip(address='10.1.1.1',
+ server_address='10.1.1.201')
+ self.assertRaises(exception.DBError,
+ db.bm_pxe_ip_create_direct,
+ self.context, i)
+
+ # server_address duplicates
+ i = utils.new_bm_pxe_ip(address='10.1.1.3',
+ server_address='10.1.1.101')
+ self.assertRaises(exception.DBError,
+ db.bm_pxe_ip_create_direct,
+ self.context, i)
+
+ db.bm_pxe_ip_destroy(self.context, self.i1['id'])
+ i = utils.new_bm_pxe_ip(address='10.1.1.1',
+ server_address='10.1.1.101')
+ ref = db.bm_pxe_ip_create_direct(self.context, i)
+ self.assertTrue(ref is not None)
+
+ def test_bm_pxe_ip_associate(self):
+ self._create_pxe_ip()
+ node = db.bm_node_create(self.context, utils.new_bm_node())
+ ip_id = db.bm_pxe_ip_associate(self.context, node['id'])
+ ref = db.bm_pxe_ip_get(self.context, ip_id)
+ self.assertEqual(ref['bm_node_id'], node['id'])
+
+ def test_bm_pxe_ip_associate_raise(self):
+ self._create_pxe_ip()
+ node_id = 123
+ self.assertRaises(exception.NovaException,
+ db.bm_pxe_ip_associate,
+ self.context, node_id)
+
+ def test_delete_by_address(self):
+ self._create_pxe_ip()
+ db.bm_pxe_ip_destroy_by_address(self.context, '10.1.1.1')
+ del_ref = db.bm_pxe_ip_get(self.context, self.i1['id'])
+ self.assertTrue(del_ref is None)
+
+ def test_delete_by_address_not_exist(self):
+ self._create_pxe_ip()
+ del_ref = db.bm_pxe_ip_destroy_by_address(self.context, '10.11.12.13')
+ self.assertTrue(del_ref is None)
diff --git a/nova/tests/baremetal/db/utils.py b/nova/tests/baremetal/db/utils.py
new file mode 100644
index 000000000..800305402
--- /dev/null
+++ b/nova/tests/baremetal/db/utils.py
@@ -0,0 +1,81 @@
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Bare-metal test utils."""
+
+from nova import test
+from nova.virt.baremetal.db.sqlalchemy import models as bm_models
+
+
+def new_bm_node(**kwargs):
+ h = bm_models.BareMetalNode()
+ h.id = kwargs.pop('id', None)
+ h.service_host = kwargs.pop('service_host', None)
+ h.instance_uuid = kwargs.pop('instance_uuid', None)
+ h.cpus = kwargs.pop('cpus', 1)
+ h.memory_mb = kwargs.pop('memory_mb', 1024)
+ h.local_gb = kwargs.pop('local_gb', 64)
+ h.pm_address = kwargs.pop('pm_address', '192.168.1.1')
+ h.pm_user = kwargs.pop('pm_user', 'ipmi_user')
+ h.pm_password = kwargs.pop('pm_password', 'ipmi_password')
+ h.prov_mac_address = kwargs.pop('prov_mac_address', '12:34:56:78:90:ab')
+ h.registration_status = kwargs.pop('registration_status', 'done')
+ h.task_state = kwargs.pop('task_state', None)
+ h.prov_vlan_id = kwargs.pop('prov_vlan_id', None)
+ h.terminal_port = kwargs.pop('terminal_port', 8000)
+ if len(kwargs) > 0:
+ raise test.TestingException("unknown field: %s"
+ % ','.join(kwargs.keys()))
+ return h
+
+
+def new_bm_pxe_ip(**kwargs):
+ x = bm_models.BareMetalPxeIp()
+ x.id = kwargs.pop('id', None)
+ x.address = kwargs.pop('address', None)
+ x.server_address = kwargs.pop('server_address', None)
+ x.bm_node_id = kwargs.pop('bm_node_id', None)
+ if len(kwargs) > 0:
+ raise test.TestingException("unknown field: %s"
+ % ','.join(kwargs.keys()))
+ return x
+
+
+def new_bm_interface(**kwargs):
+ x = bm_models.BareMetalInterface()
+ x.id = kwargs.pop('id', None)
+ x.bm_node_id = kwargs.pop('bm_node_id', None)
+ x.address = kwargs.pop('address', None)
+ x.datapath_id = kwargs.pop('datapath_id', None)
+ x.port_no = kwargs.pop('port_no', None)
+ x.vif_uuid = kwargs.pop('vif_uuid', None)
+ if len(kwargs) > 0:
+ raise test.TestingException("unknown field: %s"
+ % ','.join(kwargs.keys()))
+ return x
+
+
+def new_bm_deployment(**kwargs):
+ x = bm_models.BareMetalDeployment()
+ x.id = kwargs.pop('id', None)
+ x.key = kwargs.pop('key', None)
+ x.image_path = kwargs.pop('image_path', None)
+ x.pxe_config_path = kwargs.pop('pxe_config_path', None)
+ x.root_mb = kwargs.pop('root_mb', None)
+ x.swap_mb = kwargs.pop('swap_mb', None)
+ if len(kwargs) > 0:
+ raise test.TestingException("unknown field: %s"
+ % ','.join(kwargs.keys()))
+ return x
diff --git a/nova/tests/baremetal/test_proxy_bare_metal.py b/nova/tests/baremetal/test_proxy_bare_metal.py
deleted file mode 100644
index e9184ee5d..000000000
--- a/nova/tests/baremetal/test_proxy_bare_metal.py
+++ /dev/null
@@ -1,269 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 University of Southern California
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import __builtin__
-
-import functools
-import mox
-import StringIO
-
-from nova.compute import power_state
-from nova import exception
-from nova import flags
-from nova.openstack.common import jsonutils
-from nova import test
-from nova.tests import fake_utils
-
-from nova.virt.baremetal import dom
-from nova.virt.baremetal import driver
-
-
-FLAGS = flags.FLAGS
-
-
-# Same fake_domains is used by different classes,
-# but different fake_file is used by different classes for unit test.
-fake_domains = [{'status': 1, 'name': 'instance-00000001',
- 'memory_kb': 16777216, 'kernel_id': '1896115634',
- 'ramdisk_id': '', 'image_id': '1552326678',
- 'vcpus': 1, 'node_id': 6,
- 'mac_address': '02:16:3e:01:4e:c9',
- 'ip_address': '10.5.1.2'}]
-
-
-class DomainReadWriteTestCase(test.TestCase):
-
- def setUp(self):
- super(DomainReadWriteTestCase, self).setUp()
- self.flags(baremetal_driver='fake')
-
- def test_read_domain_with_empty_list(self):
- """Read a file that contains no domains"""
-
- self.mox.StubOutWithMock(__builtin__, 'open')
- fake_file = StringIO.StringIO('[]')
- open('/tftpboot/test_fake_dom_file', 'r').AndReturn(fake_file)
-
- self.mox.ReplayAll()
-
- domains = dom.read_domains('/tftpboot/test_fake_dom_file')
-
- self.assertEqual(domains, [])
-
- def test_read_domain(self):
- """Read a file that contains at least one domain"""
- fake_file = StringIO.StringIO('''[{"status": 1,
- "image_id": "1552326678", "vcpus": 1, "node_id": 6,
- "name": "instance-00000001", "memory_kb": 16777216,
- "mac_address": "02:16:3e:01:4e:c9", "kernel_id": "1896115634",
- "ramdisk_id": "", "ip_address": "10.5.1.2"}]''')
-
- self.mox.StubOutWithMock(__builtin__, 'open')
- open('/tftpboot/test_fake_dom_file', 'r').AndReturn(fake_file)
-
- self.mox.ReplayAll()
-
- domains = dom.read_domains('/tftpboot/test_fake_dom_file')
-
- self.assertEqual(domains, fake_domains)
-
- def test_read_no_file(self):
- """Try to read when the file does not exist
-
- This should through and IO exception"""
-
- self.mox.StubOutWithMock(__builtin__, 'open')
- open('/tftpboot/test_fake_dom_file',
- 'r').AndRaise(IOError(2, 'No such file or directory',
- '/tftpboot/test_fake_dom_file'))
-
- self.mox.ReplayAll()
-
- self.assertRaises(exception.NotFound, dom.read_domains,
- '/tftpboot/test_fake_dom_file')
-
- def assertJSONEquals(self, x, y):
- """Check if two json strings represent the equivalent Python object"""
- self.assertEquals(jsonutils.loads(x), jsonutils.loads(y))
- return jsonutils.loads(x) == jsonutils.loads(y)
-
- def test_write_domain(self):
- """Write the domain to file"""
- self.mox.StubOutWithMock(__builtin__, 'open')
- mock_file = self.mox.CreateMock(file)
- expected_json = '''[{"status": 1,
- "image_id": "1552326678", "vcpus": 1, "node_id": 6,
- "name": "instance-00000001", "memory_kb": 16777216,
- "mac_address": "02:16:3e:01:4e:c9", "kernel_id": "1896115634",
- "ramdisk_id": "", "ip_address": "10.5.1.2"}]'''
- open('/tftpboot/test_fake_dom_file', 'w').AndReturn(mock_file)
-
- # Check if the argument to file.write() represents the same
- # Python object as expected_json
- # We can't do an exact string comparison
- # because of ordering and whitespace
- mock_file.write(mox.Func(functools.partial(self.assertJSONEquals,
- expected_json)))
- mock_file.close()
-
- self.mox.ReplayAll()
-
- dom.write_domains('/tftpboot/test_fake_dom_file', fake_domains)
-
-
-class BareMetalDomTestCase(test.TestCase):
-
- def setUp(self):
- super(BareMetalDomTestCase, self).setUp()
- self.flags(baremetal_driver='fake')
- # Stub out utils.execute
- fake_utils.stub_out_utils_execute(self.stubs)
-
- def tearDown(self):
- super(BareMetalDomTestCase, self).tearDown()
-
- # Reset the singleton state
- dom.BareMetalDom._instance = None
- dom.BareMetalDom._is_init = False
-
- def test_read_domain_only_once(self):
- """Confirm that the domain is read from a file only once,
- even if the object is instantiated multiple times"""
- self.mox.StubOutWithMock(dom, 'read_domains')
- self.mox.StubOutWithMock(dom, 'write_domains')
-
- dom.read_domains('/tftpboot/test_fake_dom_file').AndReturn([])
- dom.write_domains('/tftpboot/test_fake_dom_file', [])
-
- self.mox.ReplayAll()
-
- # Instantiate multiple instances
- x = dom.BareMetalDom()
- x = dom.BareMetalDom()
- x = dom.BareMetalDom()
-
- def test_init_no_domains(self):
-
- # Create the mock objects
- self.mox.StubOutWithMock(dom, 'read_domains')
- self.mox.StubOutWithMock(dom, 'write_domains')
-
- dom.read_domains('/tftpboot/test_fake_dom_file').AndReturn([])
- dom.write_domains('/tftpboot/test_fake_dom_file', [])
-
- self.mox.ReplayAll()
-
- # Code under test
- bmdom = dom.BareMetalDom()
-
- # Expectd values
- self.assertEqual(bmdom.fake_dom_nums, 0)
-
- def test_init_remove_non_running_domain(self):
- """Check to see that all entries in the domain list are removed
- except for the one that is in the running state"""
-
- domains = [dict(node_id=1, name='i-00000001',
- status=power_state.NOSTATE),
- dict(node_id=2, name='i-00000002', status=power_state.RUNNING),
- dict(node_id=3, name='i-00000003', status=power_state.PAUSED),
- dict(node_id=5, name='i-00000004', status=power_state.SHUTDOWN),
- dict(node_id=7, name='i-00000005', status=power_state.CRASHED),
- dict(node_id=8, name='i-00000006', status=power_state.SUSPENDED),
- dict(node_id=9, name='i-00000007', status=power_state.NOSTATE)]
-
- # Create the mock objects
- self.mox.StubOutWithMock(dom, 'read_domains')
- self.mox.StubOutWithMock(dom, 'write_domains')
- dom.read_domains('/tftpboot/test_fake_dom_file').AndReturn(domains)
- dom.write_domains('/tftpboot/test_fake_dom_file', domains)
-
- self.mox.ReplayAll()
-
- # Code under test
- bmdom = dom.BareMetalDom()
-
- self.assertEqual(bmdom.domains, [{'node_id': 2,
- 'name': 'i-00000002',
- 'status': power_state.RUNNING}])
- self.assertEqual(bmdom.fake_dom_nums, 1)
-
- def test_find_domain(self):
- domain = {'status': 1, 'name': 'instance-00000001',
- 'memory_kb': 16777216, 'kernel_id': '1896115634',
- 'ramdisk_id': '', 'image_id': '1552326678',
- 'vcpus': 1, 'node_id': 6,
- 'mac_address': '02:16:3e:01:4e:c9',
- 'ip_address': '10.5.1.2'}
-
- # Create the mock objects
- self.mox.StubOutWithMock(dom, 'read_domains')
- self.mox.StubOutWithMock(dom, 'write_domains')
-
- # Expected calls
- dom.read_domains('/tftpboot/'
- 'test_fake_dom_file').AndReturn(fake_domains)
- dom.write_domains('/tftpboot/test_fake_dom_file', fake_domains)
-
- self.mox.ReplayAll()
-
- # Code under test
- bmdom = dom.BareMetalDom()
-
- # Expected values
- self.assertEquals(bmdom.find_domain('instance-00000001'), domain)
-
-
-class BareMetalTestCase(test.TestCase):
-
- test_ip = '10.11.12.13'
- test_instance = {'memory_kb': '1024000',
- 'basepath': '/some/path',
- 'bridge_name': 'br100',
- 'mac_address': '02:12:34:46:56:67',
- 'vcpus': 2,
- 'project_id': 'fake',
- 'bridge': 'br101',
- 'image_ref': '123456',
- 'instance_type_id': '5'} # m1.small
-
- def setUp(self):
- super(BareMetalTestCase, self).setUp()
- self.flags(baremetal_driver='fake')
- fake_utils.stub_out_utils_execute(self.stubs)
-
- def test_get_info(self):
- # Create the mock objects
- self.mox.StubOutWithMock(dom, 'read_domains')
- self.mox.StubOutWithMock(dom, 'write_domains')
-
- # Expected calls
- dom.read_domains('/tftpboot/'
- 'test_fake_dom_file').AndReturn(fake_domains)
- dom.write_domains('/tftpboot/test_fake_dom_file', fake_domains)
-
- self.mox.ReplayAll()
-
- # Code under test
- conn = driver.BareMetalDriver(None, True)
- # TODO(mikalstill): this is not a very good fake instance
- info = conn.get_info({'name': 'instance-00000001'})
-
- # Expected values
- self.assertEquals(info['mem'], 16777216)
- self.assertEquals(info['state'], 1)
- self.assertEquals(info['num_cpu'], 1)
- self.assertEquals(info['cpu_time'], 100)
- self.assertEquals(info['max_mem'], 16777216)
diff --git a/nova/tests/baremetal/test_tilera.py b/nova/tests/baremetal/test_tilera.py
deleted file mode 100644
index b86e6c9c1..000000000
--- a/nova/tests/baremetal/test_tilera.py
+++ /dev/null
@@ -1,84 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2011 University of Southern California
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-import __builtin__
-import StringIO
-
-from nova import test
-from nova.virt.baremetal import tilera
-
-
-class TileraBareMetalNodesTestCase(test.TestCase):
-
- def setUp(self):
- super(TileraBareMetalNodesTestCase, self).setUp()
- self.board_info = "\n".join([
-'# board_id ip_address mac_address 00:1A:CA:00:57:90 '
-'00:1A:CA:00:58:98 00:1A:CA:00:58:50',
-'6 10.0.2.7 00:1A:CA:00:58:5C 10 16218 917 476 1 tilera_hv 1 '
-'{"vendor":"tilera","model":"TILEmpower","arch":"TILEPro64",'
- '"features":["8x8Grid","32bVLIW","5.6MBCache","443BOPS","37TbMesh",'
- '"700MHz-866MHz","4DDR2","2XAUIMAC/PHY","2GbEMAC"],'
- '"topology":{"cores":"64"}}',
-'7 10.0.2.8 00:1A:CA:00:58:A4 10 16218 917 476 1 tilera_hv 1 '
-'{"vendor":"tilera","model":"TILEmpower","arch":"TILEPro64",'
- '"features":["8x8Grid","32bVLIW","5.6MBCache","443BOPS","37TbMesh",'
- '"700MHz-866MHz","4DDR2","2XAUIMAC/PHY","2GbEMAC"],'
- '"topology":{"cores":"64"}}',
-'8 10.0.2.9 00:1A:CA:00:58:1A 10 16218 917 476 1 tilera_hv 1 '
-'{"vendor":"tilera","model":"TILEmpower","arch":"TILEPro64",'
- '"features":["8x8Grid","32bVLIW","5.6MBCache","443BOPS","37TbMesh",'
- '"700MHz-866MHz","4DDR2","2XAUIMAC/PHY","2GbEMAC"],'
- '"topology":{"cores":"64"}}',
-'9 10.0.2.10 00:1A:CA:00:58:38 10 16385 1000 0 0 tilera_hv 1 '
-'{"vendor":"tilera","model":"TILEmpower","arch":"TILEPro64",'
- '"features":["8x8Grid","32bVLIW","5.6MBCache","443BOPS","37TbMesh",'
- '"700MHz-866MHz","4DDR2","2XAUIMAC/PHY","2GbEMAC"],'
- '"topology":{"cores":"64"}}'])
-
- def tearDown(self):
- super(TileraBareMetalNodesTestCase, self).tearDown()
-
- # Reset the singleton state
- tilera.BareMetalNodes._instance = None
- tilera.BareMetalNodes._is_init = False
-
- def test_singleton(self):
- """Confirm that the object acts like a singleton.
-
- In this case, we check that it only loads the config file once,
- even though it has been instantiated multiple times"""
-
- self.mox.StubOutWithMock(__builtin__, 'open')
-
- open("/tftpboot/tilera_boards",
- "r").AndReturn(StringIO.StringIO(self.board_info))
-
- self.mox.ReplayAll()
-
- nodes = tilera.BareMetalNodes("/tftpboot/tilera_boards")
- nodes = tilera.BareMetalNodes("/tftpboot/tilera_boards")
-
- def test_get_hw_info(self):
- self.mox.StubOutWithMock(__builtin__, 'open')
-
- open("/tftpboot/tilera_boards",
- "r").AndReturn(StringIO.StringIO(self.board_info))
-
- self.mox.ReplayAll()
- nodes = tilera.BareMetalNodes()
- self.assertEqual(nodes.get_hw_info('vcpus'), 10)
diff --git a/nova/tests/cert/test_rpcapi.py b/nova/tests/cert/test_rpcapi.py
index 58b07ff75..854c49c9f 100644
--- a/nova/tests/cert/test_rpcapi.py
+++ b/nova/tests/cert/test_rpcapi.py
@@ -19,13 +19,12 @@ Unit Tests for nova.cert.rpcapi
"""
from nova.cert import rpcapi as cert_rpcapi
+from nova import config
from nova import context
-from nova import flags
from nova.openstack.common import rpc
from nova import test
-
-FLAGS = flags.FLAGS
+CONF = config.CONF
class CertRpcAPITestCase(test.TestCase):
@@ -54,7 +53,7 @@ class CertRpcAPITestCase(test.TestCase):
self.assertEqual(retval, expected_retval)
self.assertEqual(self.call_ctxt, ctxt)
- self.assertEqual(self.call_topic, FLAGS.cert_topic)
+ self.assertEqual(self.call_topic, CONF.cert_topic)
self.assertEqual(self.call_msg, expected_msg)
self.assertEqual(self.call_timeout, None)
diff --git a/nova/tests/compute/test_claims.py b/nova/tests/compute/test_claims.py
index f631c1665..b780420ec 100644
--- a/nova/tests/compute/test_claims.py
+++ b/nova/tests/compute/test_claims.py
@@ -26,15 +26,27 @@ from nova import test
LOG = logging.getLogger(__name__)
+class DummyTracker(object):
+ icalled = False
+ rcalled = False
+
+ def abort_instance_claim(self, *args, **kwargs):
+ self.icalled = True
+
+ def abort_resize_claim(self, *args, **kwargs):
+ self.rcalled = True
+
+
class ClaimTestCase(test.TestCase):
def setUp(self):
super(ClaimTestCase, self).setUp()
self.resources = self._fake_resources()
+ self.tracker = DummyTracker()
def _claim(self, **kwargs):
instance = self._fake_instance(**kwargs)
- return claims.Claim(instance, None)
+ return claims.Claim(instance, self.tracker)
def _fake_instance(self, **kwargs):
instance = {
@@ -47,6 +59,18 @@ class ClaimTestCase(test.TestCase):
instance.update(**kwargs)
return instance
+ def _fake_instance_type(self, **kwargs):
+ instance_type = {
+ 'id': 1,
+ 'name': 'fakeitype',
+ 'memory_mb': 1,
+ 'vcpus': 1,
+ 'root_gb': 1,
+ 'ephemeral_gb': 2
+ }
+ instance_type.update(**kwargs)
+ return instance_type
+
def _fake_resources(self, values=None):
resources = {
'memory_mb': 2048,
@@ -109,17 +133,30 @@ class ClaimTestCase(test.TestCase):
self.assertFalse(claim.test(self.resources, limits))
def test_abort(self):
- instance = self._fake_instance(root_gb=10, ephemeral_gb=40)
+ claim = self._abort()
+ self.assertTrue(claim.tracker.icalled)
- def fake_abort(self):
- self._called = True
-
- self.stubs.Set(claims.Claim, 'abort', fake_abort)
+ def _abort(self):
claim = None
try:
- with claims.Claim(instance, None) as claim:
+ with self._claim(memory_mb=4096) as claim:
raise test.TestingException("abort")
except test.TestingException:
pass
- self.assertTrue(claim._called)
+ return claim
+
+
+class ResizeClaimTestCase(ClaimTestCase):
+
+ def setUp(self):
+ super(ResizeClaimTestCase, self).setUp()
+ self.instance = self._fake_instance()
+
+ def _claim(self, **kwargs):
+ instance_type = self._fake_instance_type(**kwargs)
+ return claims.ResizeClaim(self.instance, instance_type, self.tracker)
+
+ def test_abort(self):
+ claim = self._abort()
+ self.assertTrue(claim.tracker.rcalled)
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py
index 1c5be489d..5598da0db 100644
--- a/nova/tests/compute/test_compute.py
+++ b/nova/tests/compute/test_compute.py
@@ -23,6 +23,7 @@ import copy
import datetime
import sys
import time
+import uuid
import mox
@@ -40,7 +41,6 @@ from nova import config
from nova import context
from nova import db
from nova import exception
-from nova import flags
from nova.network import api as network_api
from nova.network import model as network_model
from nova.openstack.common import importutils
@@ -60,19 +60,22 @@ from nova.tests.db.fakes import FakeModel
from nova.tests import fake_network
from nova.tests import fake_network_cache_model
from nova.tests.image import fake as fake_image
+from nova.tests import matchers
from nova import utils
+from nova.virt import fake
from nova.volume import cinder
QUOTAS = quota.QUOTAS
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
CONF = config.CONF
CONF.import_opt('live_migration_retry_count', 'nova.compute.manager')
FAKE_IMAGE_REF = 'fake-image-ref'
+NODENAME = 'fakenode1'
+
def nop_report_driver_status(self):
pass
@@ -101,12 +104,13 @@ class BaseTestCase(test.TestCase):
self.flags(compute_driver='nova.virt.fake.FakeDriver',
notification_driver=[test_notifier.__name__],
network_manager='nova.network.manager.FlatManager')
- self.compute = importutils.import_object(FLAGS.compute_manager)
+ fake.set_nodes([NODENAME])
+ self.compute = importutils.import_object(CONF.compute_manager)
# override tracker with a version that doesn't need the database:
- self.compute.resource_tracker = \
- fake_resource_tracker.FakeResourceTracker(self.compute.host,
- self.compute.driver)
+ fake_rt = fake_resource_tracker.FakeResourceTracker(self.compute.host,
+ self.compute.driver, NODENAME)
+ self.compute._resource_tracker_dict[NODENAME] = fake_rt
self.compute.update_available_resource(
context.get_admin_context())
@@ -137,6 +141,7 @@ class BaseTestCase(test.TestCase):
notifier_api._reset_drivers()
for instance in instances:
db.instance_destroy(self.context.elevated(), instance['uuid'])
+ fake.restore_nodes()
super(BaseTestCase, self).tearDown()
def _create_fake_instance(self, params=None, type_name='m1.tiny'):
@@ -152,6 +157,7 @@ class BaseTestCase(test.TestCase):
inst['user_id'] = self.user_id
inst['project_id'] = self.project_id
inst['host'] = 'fake_host'
+ inst['node'] = NODENAME
type_id = instance_types.get_instance_type_by_name(type_name)['id']
inst['instance_type_id'] = type_id
inst['ami_launch_index'] = 0
@@ -210,7 +216,7 @@ class ComputeTestCase(BaseTestCase):
fake_get_nw_info)
self.compute_api = compute.API()
# Just to make long lines short
- self.rt = self.compute.resource_tracker
+ self.rt = self.compute._get_resource_tracker(NODENAME)
def tearDown(self):
super(ComputeTestCase, self).tearDown()
@@ -358,7 +364,7 @@ class ComputeTestCase(BaseTestCase):
self.rt.update_available_resource(self.context.elevated())
# get total memory as reported by virt driver:
- resources = self.compute.driver.get_available_resource()
+ resources = self.compute.driver.get_available_resource(NODENAME)
total_mem_mb = resources['memory_mb']
oversub_limit_mb = total_mem_mb * 1.5
@@ -385,7 +391,7 @@ class ComputeTestCase(BaseTestCase):
self.rt.update_available_resource(self.context.elevated())
# get total memory as reported by virt driver:
- resources = self.compute.driver.get_available_resource()
+ resources = self.compute.driver.get_available_resource(NODENAME)
total_mem_mb = resources['memory_mb']
oversub_limit_mb = total_mem_mb * 1.5
@@ -412,7 +418,7 @@ class ComputeTestCase(BaseTestCase):
filter_properties = {'limits': limits}
# get total memory as reported by virt driver:
- resources = self.compute.driver.get_available_resource()
+ resources = self.compute.driver.get_available_resource(NODENAME)
self.assertEqual(1, resources['vcpus'])
# build an instance, specifying an amount of memory that exceeds
@@ -459,7 +465,7 @@ class ComputeTestCase(BaseTestCase):
self.rt.update_available_resource(self.context.elevated())
# get total memory as reported by virt driver:
- resources = self.compute.driver.get_available_resource()
+ resources = self.compute.driver.get_available_resource(NODENAME)
total_disk_gb = resources['local_gb']
oversub_limit_gb = total_disk_gb * 1.5
@@ -485,7 +491,7 @@ class ComputeTestCase(BaseTestCase):
self.rt.update_available_resource(self.context.elevated())
# get total memory as reported by virt driver:
- resources = self.compute.driver.get_available_resource()
+ resources = self.compute.driver.get_available_resource(NODENAME)
total_disk_gb = resources['local_gb']
oversub_limit_gb = total_disk_gb * 1.5
@@ -1767,7 +1773,7 @@ class ComputeTestCase(BaseTestCase):
migration_ref = db.migration_get_by_instance_and_status(
self.context.elevated(), instance['uuid'], 'pre-migrating')
self.compute.resize_instance(self.context, instance=instance,
- migration=migration_ref, image={})
+ migration=migration_ref, image={}, instance_type=new_type)
timeutils.set_time_override(cur_time)
test_notifier.NOTIFICATIONS = []
@@ -1890,7 +1896,8 @@ class ComputeTestCase(BaseTestCase):
self.assertRaises(test.TestingException, self.compute.resize_instance,
self.context, instance=instance,
migration=migration_ref, image={},
- reservations=reservations)
+ reservations=reservations,
+ instance_type=jsonutils.to_primitive(instance_type))
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(instance['vm_state'], vm_states.ERROR)
@@ -1912,8 +1919,8 @@ class ComputeTestCase(BaseTestCase):
db.instance_update(self.context, instance['uuid'],
{"task_state": task_states.RESIZE_PREP})
self.compute.resize_instance(self.context, instance=instance,
- migration=migration_ref,
- image={})
+ migration=migration_ref, image={},
+ instance_type=jsonutils.to_primitive(instance_type))
inst = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(migration_ref['dest_compute'], inst['host'])
@@ -1946,9 +1953,10 @@ class ComputeTestCase(BaseTestCase):
new_instance_type_ref = db.instance_type_get_by_flavor_id(
self.context, 3)
+ new_instance_type_p = jsonutils.to_primitive(new_instance_type_ref)
self.compute.prep_resize(self.context,
instance=jsonutils.to_primitive(new_inst_ref),
- instance_type=jsonutils.to_primitive(new_instance_type_ref),
+ instance_type=new_instance_type_p,
image={}, reservations=reservations)
migration_ref = db.migration_get_by_instance_and_status(
@@ -1960,7 +1968,8 @@ class ComputeTestCase(BaseTestCase):
{"task_state": task_states.RESIZE_PREP})
self.compute.resize_instance(self.context, instance=instance,
migration=migration_ref,
- image={})
+ image={},
+ instance_type=new_instance_type_p)
self.compute.finish_resize(self.context,
migration=jsonutils.to_primitive(migration_ref),
disk_info={}, image={}, instance=instance)
@@ -2048,7 +2057,8 @@ class ComputeTestCase(BaseTestCase):
self.assertRaises(test.TestingException, self.compute.resize_instance,
self.context, instance=inst_ref,
migration=migration_ref, image={},
- reservations=reservations)
+ reservations=reservations,
+ instance_type=jsonutils.to_primitive(instance_type))
inst_ref = db.instance_get_by_uuid(self.context, inst_ref['uuid'])
self.assertEqual(inst_ref['vm_state'], vm_states.ERROR)
self.compute.terminate_instance(self.context,
@@ -2091,7 +2101,7 @@ class ComputeTestCase(BaseTestCase):
self.compute._get_compute_info(
self.context, inst_ref['host']).AndReturn(compute_info)
self.compute._get_compute_info(
- self.context, FLAGS.host).AndReturn(compute_info)
+ self.context, CONF.host).AndReturn(compute_info)
self.compute.driver.check_can_live_migrate_destination(self.context,
inst_ref,
compute_info, compute_info,
@@ -2120,7 +2130,7 @@ class ComputeTestCase(BaseTestCase):
self.compute._get_compute_info(
self.context, inst_ref['host']).AndReturn(compute_info)
self.compute._get_compute_info(
- self.context, FLAGS.host).AndReturn(compute_info)
+ self.context, CONF.host).AndReturn(compute_info)
self.compute.driver.check_can_live_migrate_destination(self.context,
inst_ref,
compute_info, compute_info,
@@ -2151,7 +2161,7 @@ class ComputeTestCase(BaseTestCase):
self.compute._get_compute_info(
self.context, inst_ref['host']).AndReturn(compute_info)
self.compute._get_compute_info(
- self.context, FLAGS.host).AndReturn(compute_info)
+ self.context, CONF.host).AndReturn(compute_info)
self.compute.driver.check_can_live_migrate_destination(self.context,
inst_ref,
compute_info, compute_info,
@@ -2218,7 +2228,7 @@ class ComputeTestCase(BaseTestCase):
inst_id = instance['id']
c = context.get_admin_context()
- topic = rpc.queue_get_for(c, FLAGS.compute_topic, instance['host'])
+ topic = rpc.queue_get_for(c, CONF.compute_topic, instance['host'])
# creating volume testdata
volume_id = 'fake'
@@ -2290,7 +2300,7 @@ class ComputeTestCase(BaseTestCase):
# create
self.mox.StubOutWithMock(rpc, 'call')
- topic = rpc.queue_get_for(c, FLAGS.compute_topic, instance['host'])
+ topic = rpc.queue_get_for(c, CONF.compute_topic, instance['host'])
rpc.call(c, topic,
{"method": "pre_live_migration",
"args": {'instance': instance,
@@ -2334,7 +2344,7 @@ class ComputeTestCase(BaseTestCase):
self.mox.StubOutWithMock(self.compute.driver, 'unfilter_instance')
self.compute.driver.unfilter_instance(inst_ref, [])
self.mox.StubOutWithMock(rpc, 'call')
- rpc.call(c, rpc.queue_get_for(c, FLAGS.compute_topic, dest),
+ rpc.call(c, rpc.queue_get_for(c, CONF.compute_topic, dest),
{"method": "post_live_migration_at_destination",
"args": {'instance': inst_ref, 'block_migration': False},
"version": compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION},
@@ -2387,7 +2397,7 @@ class ComputeTestCase(BaseTestCase):
def test_add_instance_fault(self):
exc_info = None
- instance_uuid = str(utils.gen_uuid())
+ instance_uuid = str(uuid.uuid4())
def fake_db_fault_create(ctxt, values):
self.assertTrue(values['details'].startswith('test'))
@@ -2415,7 +2425,7 @@ class ComputeTestCase(BaseTestCase):
def test_add_instance_fault_with_remote_error(self):
exc_info = None
- instance_uuid = str(utils.gen_uuid())
+ instance_uuid = str(uuid.uuid4())
def fake_db_fault_create(ctxt, values):
self.assertTrue(values['details'].startswith('Remote error'))
@@ -2444,7 +2454,7 @@ class ComputeTestCase(BaseTestCase):
def test_add_instance_fault_user_error(self):
exc_info = None
- instance_uuid = str(utils.gen_uuid())
+ instance_uuid = str(uuid.uuid4())
def fake_db_fault_create(ctxt, values):
@@ -2470,7 +2480,7 @@ class ComputeTestCase(BaseTestCase):
user_exc, exc_info)
def test_add_instance_fault_no_exc_info(self):
- instance_uuid = str(utils.gen_uuid())
+ instance_uuid = str(uuid.uuid4())
def fake_db_fault_create(ctxt, values):
expected = {
@@ -2538,7 +2548,7 @@ class ComputeTestCase(BaseTestCase):
self.mox.StubOutWithMock(timeutils, 'is_older_than')
timeutils.is_older_than('sometimeago',
- FLAGS.running_deleted_instance_timeout).AndReturn(True)
+ CONF.running_deleted_instance_timeout).AndReturn(True)
self.mox.StubOutWithMock(self.compute.db, "instance_get_all_by_host")
self.compute.db.instance_get_all_by_host('context',
@@ -2558,7 +2568,7 @@ class ComputeTestCase(BaseTestCase):
instances = []
for x in xrange(5):
uuid = 'fake-uuid-%s' % x
- instance_map[uuid] = {'uuid': uuid, 'host': FLAGS.host}
+ instance_map[uuid] = {'uuid': uuid, 'host': CONF.host}
instances.append(instance_map[uuid])
call_info = {'get_all_by_host': 0, 'get_by_uuid': 0,
@@ -2662,7 +2672,7 @@ class ComputeTestCase(BaseTestCase):
def fake_migration_get_unconfirmed_by_dest_compute(context,
resize_confirm_window, dest_compute):
- self.assertEqual(dest_compute, FLAGS.host)
+ self.assertEqual(dest_compute, CONF.host)
return migrations
def fake_migration_update(context, migration_id, values):
@@ -2724,7 +2734,7 @@ class ComputeTestCase(BaseTestCase):
instances = []
for x in xrange(5):
uuid = 'fake-uuid-%s' % x
- instance_map[uuid] = {'uuid': uuid, 'host': FLAGS.host,
+ instance_map[uuid] = {'uuid': uuid, 'host': CONF.host,
'vm_state': vm_states.BUILDING,
'created_at': created_at}
instances.append(instance_map[uuid])
@@ -2756,7 +2766,7 @@ class ComputeTestCase(BaseTestCase):
instances = []
for x in xrange(5):
uuid = 'fake-uuid-%s' % x
- instance_map[uuid] = {'uuid': uuid, 'host': FLAGS.host,
+ instance_map[uuid] = {'uuid': uuid, 'host': CONF.host,
'vm_state': vm_states.BUILDING,
'created_at': created_at}
instances.append(instance_map[uuid])
@@ -2789,7 +2799,7 @@ class ComputeTestCase(BaseTestCase):
#expired instances
for x in xrange(4):
uuid = 'fake-uuid-%s' % x
- instance_map[uuid] = {'uuid': uuid, 'host': FLAGS.host,
+ instance_map[uuid] = {'uuid': uuid, 'host': CONF.host,
'vm_state': vm_states.BUILDING,
'created_at': created_at}
instances.append(instance_map[uuid])
@@ -2798,7 +2808,7 @@ class ComputeTestCase(BaseTestCase):
uuid = 'fake-uuid-5'
instance_map[uuid] = {
'uuid': uuid,
- 'host': FLAGS.host,
+ 'host': CONF.host,
'vm_state': vm_states.BUILDING,
'created_at': timeutils.utcnow(),
}
@@ -3026,7 +3036,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, refs[0]['uuid'])
def test_default_hostname_generator(self):
- fake_uuids = [str(utils.gen_uuid()) for x in xrange(4)]
+ fake_uuids = [str(uuid.uuid4()) for x in xrange(4)]
orig_populate = self.compute_api._populate_instance_for_create
@@ -3154,7 +3164,7 @@ class ComputeAPITestCase(BaseTestCase):
def test_delete(self):
instance, instance_uuid = self._run_instance(params={
- 'host': FLAGS.host})
+ 'host': CONF.host})
self.compute_api.delete(self.context, instance)
@@ -3165,7 +3175,7 @@ class ComputeAPITestCase(BaseTestCase):
def test_delete_in_resized(self):
instance, instance_uuid = self._run_instance(params={
- 'host': FLAGS.host})
+ 'host': CONF.host})
instance['vm_state'] = vm_states.RESIZED
@@ -3189,7 +3199,7 @@ class ComputeAPITestCase(BaseTestCase):
old_time = datetime.datetime(2012, 4, 1)
instance, instance_uuid = self._run_instance(params={
- 'host': FLAGS.host})
+ 'host': CONF.host})
timeutils.set_time_override(old_time)
self.compute_api.delete(self.context, instance)
timeutils.clear_time_override()
@@ -3220,7 +3230,7 @@ class ComputeAPITestCase(BaseTestCase):
self.stubs.Set(QUOTAS, 'commit', fake_commit)
instance, instance_uuid = self._run_instance(params={
- 'host': FLAGS.host})
+ 'host': CONF.host})
self.compute_api.delete(self.context, instance)
self.compute_api.delete(self.context, instance)
@@ -3240,7 +3250,7 @@ class ComputeAPITestCase(BaseTestCase):
def test_delete_handles_host_setting_race_condition(self):
instance, instance_uuid = self._run_instance(params={
- 'host': FLAGS.host})
+ 'host': CONF.host})
instance['host'] = None # make it think host was never set
self.compute_api.delete(self.context, instance)
@@ -3251,7 +3261,7 @@ class ComputeAPITestCase(BaseTestCase):
def test_delete_fail(self):
instance, instance_uuid = self._run_instance(params={
- 'host': FLAGS.host})
+ 'host': CONF.host})
instance = db.instance_update(self.context, instance_uuid,
{'disable_terminate': True})
@@ -3264,7 +3274,7 @@ class ComputeAPITestCase(BaseTestCase):
def test_delete_soft(self):
instance, instance_uuid = self._run_instance(params={
- 'host': FLAGS.host})
+ 'host': CONF.host})
self.mox.StubOutWithMock(nova.quota.QUOTAS, 'commit')
nova.quota.QUOTAS.commit(mox.IgnoreArg(), mox.IgnoreArg())
@@ -3279,7 +3289,7 @@ class ComputeAPITestCase(BaseTestCase):
def test_delete_soft_fail(self):
instance, instance_uuid = self._run_instance(params={
- 'host': FLAGS.host})
+ 'host': CONF.host})
instance = db.instance_update(self.context, instance_uuid,
{'disable_terminate': True})
@@ -3292,7 +3302,7 @@ class ComputeAPITestCase(BaseTestCase):
def test_delete_soft_rollback(self):
instance, instance_uuid = self._run_instance(params={
- 'host': FLAGS.host})
+ 'host': CONF.host})
self.mox.StubOutWithMock(nova.quota.QUOTAS, 'rollback')
nova.quota.QUOTAS.rollback(mox.IgnoreArg(), mox.IgnoreArg())
@@ -3314,7 +3324,7 @@ class ComputeAPITestCase(BaseTestCase):
def test_force_delete(self):
"""Ensure instance can be deleted after a soft delete"""
instance = jsonutils.to_primitive(self._create_fake_instance(params={
- 'host': FLAGS.host}))
+ 'host': CONF.host}))
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance=instance)
@@ -3406,7 +3416,7 @@ class ComputeAPITestCase(BaseTestCase):
def test_restore(self):
"""Ensure instance can be restored from a soft delete"""
instance, instance_uuid = self._run_instance(params={
- 'host': FLAGS.host})
+ 'host': CONF.host})
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.compute_api.soft_delete(self.context, instance)
@@ -3969,10 +3979,11 @@ class ComputeAPITestCase(BaseTestCase):
instance = self._create_fake_instance(dict(host='host2'))
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
instance = jsonutils.to_primitive(instance)
+ instance['instance_type']['extra_specs'] = []
orig_instance_type = instance['instance_type']
self.compute.run_instance(self.context, instance=instance)
# We need to set the host to something 'known'. Unfortunately,
- # the compute manager is using a cached copy of FLAGS.host,
+ # the compute manager is using a cached copy of CONF.host,
# so we can't just self.flags(host='host2') before calling
# run_instance above. Also, set progress to 10 so we ensure
# it is reset to 0 in compute_api.resize(). (verified in
@@ -4007,7 +4018,7 @@ class ComputeAPITestCase(BaseTestCase):
instance = jsonutils.to_primitive(instance)
self.compute.run_instance(self.context, instance=instance)
# We need to set the host to something 'known'. Unfortunately,
- # the compute manager is using a cached copy of FLAGS.host,
+ # the compute manager is using a cached copy of CONF.host,
# so we can't just self.flags(host='host2') before calling
# run_instance above. Also, set progress to 10 so we ensure
# it is reset to 0 in compute_api.resize(). (verified in
@@ -4499,7 +4510,7 @@ class ComputeAPITestCase(BaseTestCase):
]
bdms.sort()
expected_result.sort()
- self.assertDictListMatch(bdms, expected_result)
+ self.assertThat(bdms, matchers.DictListMatches(expected_result))
self.compute_api._update_block_device_mapping(
self.context, instance_types.get_default_instance_type(),
@@ -4535,7 +4546,7 @@ class ComputeAPITestCase(BaseTestCase):
{'no_device': True, 'device_name': '/dev/sdd4'}]
bdms.sort()
expected_result.sort()
- self.assertDictListMatch(bdms, expected_result)
+ self.assertThat(bdms, matchers.DictListMatches(expected_result))
for bdm in db.block_device_mapping_get_all_by_instance(
self.context, instance['uuid']):
@@ -4628,7 +4639,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, i_ref['uuid'])
def test_add_remove_fixed_ip(self):
- instance = self._create_fake_instance(params={'host': FLAGS.host})
+ instance = self._create_fake_instance(params={'host': CONF.host})
self.compute_api.add_fixed_ip(self.context, instance, '1')
self.compute_api.remove_fixed_ip(self.context, instance, '192.168.1.1')
self.compute_api.delete(self.context, instance)
@@ -4667,7 +4678,7 @@ class ComputeAPITestCase(BaseTestCase):
rpc.call(self.context, 'compute.%s' % fake_instance['host'],
rpc_msg1, None).AndReturn(fake_connect_info2)
- rpc.call(self.context, FLAGS.consoleauth_topic,
+ rpc.call(self.context, CONF.consoleauth_topic,
rpc_msg2, None).AndReturn(None)
self.mox.ReplayAll()
@@ -4685,6 +4696,23 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, instance['uuid'])
+ def test_get_backdoor_port(self):
+ """Test api call to get backdoor_port"""
+ fake_backdoor_port = 59697
+
+ self.mox.StubOutWithMock(rpc, 'call')
+
+ rpc_msg = {'method': 'get_backdoor_port',
+ 'args': {},
+ 'version': compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION}
+ rpc.call(self.context, 'compute.fake_host', rpc_msg,
+ None).AndReturn(fake_backdoor_port)
+
+ self.mox.ReplayAll()
+
+ port = self.compute_api.get_backdoor_port(self.context, 'fake_host')
+ self.assertEqual(port, fake_backdoor_port)
+
def test_console_output(self):
fake_instance = {'uuid': 'fake_uuid',
'host': 'fake_compute_host'}
@@ -4796,7 +4824,7 @@ class ComputeAPITestCase(BaseTestCase):
self.assertTrue(result["detached"])
def test_inject_network_info(self):
- instance = self._create_fake_instance(params={'host': FLAGS.host})
+ instance = self._create_fake_instance(params={'host': CONF.host})
self.compute.run_instance(self.context,
instance=jsonutils.to_primitive(instance))
instance = self.compute_api.get(self.context, instance['uuid'])
@@ -4871,7 +4899,7 @@ class ComputeAPITestCase(BaseTestCase):
self.stubs.Set(self.compute_api.db, 'security_group_get', group_get)
self.mox.StubOutWithMock(rpc, 'cast')
- topic = rpc.queue_get_for(self.context, FLAGS.compute_topic,
+ topic = rpc.queue_get_for(self.context, CONF.compute_topic,
instance['host'])
rpc.cast(self.context, topic,
{"method": "refresh_instance_security_rules",
@@ -4900,7 +4928,7 @@ class ComputeAPITestCase(BaseTestCase):
self.stubs.Set(self.compute_api.db, 'security_group_get', group_get)
self.mox.StubOutWithMock(rpc, 'cast')
- topic = rpc.queue_get_for(self.context, FLAGS.compute_topic,
+ topic = rpc.queue_get_for(self.context, CONF.compute_topic,
instance['host'])
rpc.cast(self.context, topic,
{"method": "refresh_instance_security_rules",
@@ -4941,7 +4969,7 @@ class ComputeAPITestCase(BaseTestCase):
self.stubs.Set(self.compute_api.db, 'security_group_get', group_get)
self.mox.StubOutWithMock(rpc, 'cast')
- topic = rpc.queue_get_for(self.context, FLAGS.compute_topic,
+ topic = rpc.queue_get_for(self.context, CONF.compute_topic,
instance['host'])
rpc.cast(self.context, topic,
{"method": "refresh_instance_security_rules",
@@ -4962,7 +4990,7 @@ class ComputeAPITestCase(BaseTestCase):
self.stubs.Set(self.compute_api.db, 'security_group_get', group_get)
self.mox.StubOutWithMock(rpc, 'cast')
- topic = rpc.queue_get_for(self.context, FLAGS.compute_topic,
+ topic = rpc.queue_get_for(self.context, CONF.compute_topic,
instance['host'])
rpc.cast(self.context, topic,
{"method": "refresh_instance_security_rules",
@@ -5046,7 +5074,8 @@ class ComputeAPIAggrTestCase(BaseTestCase):
metadata['foo_key1'] = None
expected = self.api.update_aggregate_metadata(self.context,
aggr['id'], metadata)
- self.assertDictMatch(expected['metadata'], {'foo_key2': 'foo_value2'})
+ self.assertThat(expected['metadata'],
+ matchers.DictMatches({'foo_key2': 'foo_value2'}))
def test_delete_aggregate(self):
"""Ensure we can delete an aggregate."""
@@ -5134,6 +5163,19 @@ class ComputeAPIAggrTestCase(BaseTestCase):
self.context, aggr['id'], 'invalid_host')
+class ComputeBackdoorPortTestCase(BaseTestCase):
+ """This is for unit test coverage of backdoor port rpc"""
+
+ def setUp(self):
+ super(ComputeBackdoorPortTestCase, self).setUp()
+ self.context = context.get_admin_context()
+ self.compute.backdoor_port = 59697
+
+ def test_get_backdoor_port(self):
+ port = self.compute.get_backdoor_port(self.context)
+ self.assertEqual(port, self.compute.backdoor_port)
+
+
class ComputeAggrTestCase(BaseTestCase):
"""This is for unit coverage of aggregate-related methods
defined in nova.compute.manager."""
@@ -5165,7 +5207,8 @@ class ComputeAggrTestCase(BaseTestCase):
self.stubs.Set(self.compute.driver, "remove_from_aggregate",
fake_driver_remove_from_aggregate)
- self.compute.remove_aggregate_host(self.context, self.aggr.id, "host")
+ self.compute.remove_aggregate_host(self.context,
+ aggregate=jsonutils.to_primitive(self.aggr), host="host")
self.assertTrue(fake_driver_remove_from_aggregate.called)
def test_add_aggregate_host_passes_slave_info_to_driver(self):
@@ -5185,7 +5228,7 @@ class ComputeAggrTestCase(BaseTestCase):
def test_remove_from_aggregate_passes_slave_info_to_driver(self):
def driver_remove_from_aggregate(context, aggregate, host, **kwargs):
self.assertEquals(self.context, context)
- self.assertEquals(aggregate.id, self.aggr.id)
+ self.assertEquals(aggregate['id'], self.aggr.id)
self.assertEquals(host, "the_host")
self.assertEquals("SLAVE_INFO", kwargs.get("slave_info"))
@@ -5193,7 +5236,8 @@ class ComputeAggrTestCase(BaseTestCase):
driver_remove_from_aggregate)
self.compute.remove_aggregate_host(self.context,
- self.aggr.id, "the_host", slave_info="SLAVE_INFO")
+ aggregate=jsonutils.to_primitive(self.aggr), host="the_host",
+ slave_info="SLAVE_INFO")
class ComputePolicyTestCase(BaseTestCase):
@@ -5453,7 +5497,7 @@ class KeypairAPITestCase(BaseTestCase):
def test_create_keypair_quota_limit(self):
def fake_quotas_count(self, context, resource, *args, **kwargs):
- return FLAGS.quota_key_pairs
+ return CONF.quota_key_pairs
self.stubs.Set(QUOTAS, "count", fake_quotas_count)
self.assertRaises(exception.KeypairLimitExceeded,
self.keypair_api.create_key_pair,
@@ -5487,7 +5531,7 @@ class KeypairAPITestCase(BaseTestCase):
def test_import_keypair_quota_limit(self):
def fake_quotas_count(self, context, resource, *args, **kwargs):
- return FLAGS.quota_key_pairs
+ return CONF.quota_key_pairs
self.stubs.Set(QUOTAS, "count", fake_quotas_count)
self.assertRaises(exception.KeypairLimitExceeded,
self.keypair_api.import_key_pair,
diff --git a/nova/tests/compute/test_compute_utils.py b/nova/tests/compute/test_compute_utils.py
index 056450708..53a5b20ae 100644
--- a/nova/tests/compute/test_compute_utils.py
+++ b/nova/tests/compute/test_compute_utils.py
@@ -21,10 +21,10 @@ import string
from nova.compute import instance_types
from nova.compute import utils as compute_utils
+from nova import config
from nova import context
from nova import db
from nova import exception
-from nova import flags
from nova.network import api as network_api
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
@@ -35,9 +35,8 @@ from nova.tests import fake_network
import nova.tests.image.fake
from nova import utils
-
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
+CONF = config.CONF
class ComputeValidateDeviceTestCase(test.TestCase):
@@ -217,7 +216,7 @@ class UsageInfoTestCase(test.TestCase):
self.flags(compute_driver='nova.virt.fake.FakeDriver',
notification_driver=[test_notifier.__name__],
network_manager='nova.network.manager.FlatManager')
- self.compute = importutils.import_object(FLAGS.compute_manager)
+ self.compute = importutils.import_object(CONF.compute_manager)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
diff --git a/nova/tests/compute/test_multiple_nodes.py b/nova/tests/compute/test_multiple_nodes.py
new file mode 100644
index 000000000..830897954
--- /dev/null
+++ b/nova/tests/compute/test_multiple_nodes.py
@@ -0,0 +1,99 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""Tests for compute service with multiple compute nodes"""
+
+import mox
+
+from nova import config
+from nova import context
+from nova import exception
+from nova.openstack.common import importutils
+from nova import test
+from nova.virt import fake
+
+
+CONF = config.CONF
+
+
+class BaseTestCase(test.TestCase):
+ def tearDown(self):
+ fake.restore_nodes()
+ super(BaseTestCase, self).tearDown()
+
+
+class FakeDriverSingleNodeTestCase(BaseTestCase):
+ def setUp(self):
+ super(FakeDriverSingleNodeTestCase, self).setUp()
+ self.driver = fake.FakeDriver(virtapi=None)
+ fake.set_nodes(['xyz'])
+
+ def test_get_host_stats(self):
+ stats = self.driver.get_host_stats()
+ self.assertTrue(isinstance(stats, dict))
+ self.assertEqual(stats['hypervisor_hostname'], 'xyz')
+
+ def test_get_available_resource(self):
+ res = self.driver.get_available_resource('xyz')
+ self.assertEqual(res['hypervisor_hostname'], 'xyz')
+
+
+class FakeDriverMultiNodeTestCase(BaseTestCase):
+ def setUp(self):
+ super(FakeDriverMultiNodeTestCase, self).setUp()
+ self.driver = fake.FakeDriver(virtapi=None)
+ fake.set_nodes(['aaa', 'bbb'])
+
+ def test_get_host_stats(self):
+ stats = self.driver.get_host_stats()
+ self.assertTrue(isinstance(stats, list))
+ self.assertEqual(len(stats), 2)
+ self.assertEqual(stats[0]['hypervisor_hostname'], 'aaa')
+ self.assertEqual(stats[1]['hypervisor_hostname'], 'bbb')
+
+ def test_get_available_resource(self):
+ res_a = self.driver.get_available_resource('aaa')
+ self.assertEqual(res_a['hypervisor_hostname'], 'aaa')
+
+ res_b = self.driver.get_available_resource('bbb')
+ self.assertEqual(res_b['hypervisor_hostname'], 'bbb')
+
+ self.assertRaises(exception.NovaException,
+ self.driver.get_available_resource, 'xxx')
+
+
+class MultiNodeComputeTestCase(BaseTestCase):
+ def setUp(self):
+ super(MultiNodeComputeTestCase, self).setUp()
+ self.flags(compute_driver='nova.virt.fake.FakeDriver')
+ self.compute = importutils.import_object(CONF.compute_manager)
+
+ def test_update_available_resource_add_remove_node(self):
+ ctx = context.get_admin_context()
+ fake.set_nodes(['A', 'B', 'C'])
+ self.compute.update_available_resource(ctx)
+ self.assertEqual(sorted(self.compute._resource_tracker_dict.keys()),
+ ['A', 'B', 'C'])
+
+ fake.set_nodes(['A', 'B'])
+ self.compute.update_available_resource(ctx)
+ self.assertEqual(sorted(self.compute._resource_tracker_dict.keys()),
+ ['A', 'B'])
+
+ fake.set_nodes(['A', 'B', 'C'])
+ self.compute.update_available_resource(ctx)
+ self.assertEqual(sorted(self.compute._resource_tracker_dict.keys()),
+ ['A', 'B', 'C'])
diff --git a/nova/tests/compute/test_resource_tracker.py b/nova/tests/compute/test_resource_tracker.py
index 64cdb8d53..5a7fcac2b 100644
--- a/nova/tests/compute/test_resource_tracker.py
+++ b/nova/tests/compute/test_resource_tracker.py
@@ -19,6 +19,8 @@
import uuid
+from nova.compute import claims
+from nova.compute import instance_types
from nova.compute import resource_tracker
from nova.compute import task_states
from nova.compute import vm_states
@@ -40,10 +42,14 @@ FAKE_VIRT_VCPUS = 1
class UnsupportedVirtDriver(driver.ComputeDriver):
"""Pretend version of a lame virt driver"""
+
def __init__(self):
super(UnsupportedVirtDriver, self).__init__(None)
- def get_available_resource(self):
+ def get_host_ip_addr(self):
+ return '127.0.0.1'
+
+ def get_available_resource(self, nodename):
# no support for getting resource usage info
return {}
@@ -59,7 +65,10 @@ class FakeVirtDriver(driver.ComputeDriver):
self.memory_mb_used = 0
self.local_gb_used = 0
- def get_available_resource(self):
+ def get_host_ip_addr(self):
+ return '127.0.0.1'
+
+ def get_available_resource(self, nodename):
d = {
'vcpus': self.vcpus,
'memory_mb': self.memory_mb,
@@ -83,13 +92,18 @@ class BaseTestCase(test.TestCase):
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
- self.context = context.RequestContext('fake', 'fake')
+ self.context = context.get_admin_context()
self._instances = {}
- self.stubs.Set(db, 'instance_get_all_by_host',
- lambda c, h: self._instances.values())
+ self._instance_types = {}
+
+ self.stubs.Set(db, 'instance_get_all_by_host_and_node',
+ self._fake_instance_get_all_by_host_and_node)
self.stubs.Set(db, 'instance_update_and_get_original',
self._fake_instance_update_and_get_original)
+ self.stubs.Set(db, 'instance_type_get', self._fake_instance_type_get)
+
+ self.host = 'fakehost'
def _create_compute_node(self, values=None):
compute = {
@@ -106,7 +120,8 @@ class BaseTestCase(test.TestCase):
"current_workload": 1,
"running_vms": 0,
"cpu_info": None,
- "stats": [{"key": "num_instances", "value": "1"}]
+ "stats": [{"key": "num_instances", "value": "1"}],
+ "hypervisor_hostname": "fakenode",
}
if values:
compute.update(values)
@@ -130,7 +145,7 @@ class BaseTestCase(test.TestCase):
instance_uuid = str(uuid.uuid1())
instance = {
'uuid': instance_uuid,
- 'vm_state': vm_states.BUILDING,
+ 'vm_state': vm_states.RESIZED,
'task_state': None,
'memory_mb': 2,
'root_gb': 3,
@@ -139,12 +154,35 @@ class BaseTestCase(test.TestCase):
'project_id': '123456',
'vcpus': 1,
'host': None,
+ 'instance_type_id': 1,
}
instance.update(kwargs)
self._instances[instance_uuid] = instance
return instance
+ def _fake_instance_type_create(self, **kwargs):
+ instance_type = {
+ 'id': 1,
+ 'name': 'fakeitype',
+ 'memory_mb': FAKE_VIRT_MEMORY_MB,
+ 'vcpus': FAKE_VIRT_VCPUS,
+ 'root_gb': FAKE_VIRT_LOCAL_GB / 2,
+ 'ephemeral_gb': FAKE_VIRT_LOCAL_GB / 2,
+ 'flavorid': 'fakeflavor'
+ }
+ instance_type.update(**kwargs)
+
+ id_ = instance_type['id']
+ self._instance_types[id_] = instance_type
+ return instance_type
+
+ def _fake_instance_get_all_by_host_and_node(self, context, host, nodename):
+ return [i for i in self._instances.values() if i['host'] == host]
+
+ def _fake_instance_type_get(self, ctxt, id_):
+ return self._instance_types[id_]
+
def _fake_instance_update_and_get_original(self, context, instance_uuid,
values):
instance = self._instances[instance_uuid]
@@ -153,15 +191,19 @@ class BaseTestCase(test.TestCase):
# only used in the subsequent notification:
return (instance, instance)
- def _tracker(self, unsupported=False):
- host = "fakehost"
+ def _tracker(self, host=None, unsupported=False):
+
+ if host is None:
+ host = self.host
+
+ node = "fakenode"
if unsupported:
driver = UnsupportedVirtDriver()
else:
driver = FakeVirtDriver()
- tracker = resource_tracker.ResourceTracker(host, driver)
+ tracker = resource_tracker.ResourceTracker(host, driver, node)
return tracker
@@ -204,6 +246,23 @@ class UnsupportedDriverTestCase(BaseTestCase):
root_gb=10)
self.tracker.update_usage(self.context, instance)
+ def testDisabledResizeClaim(self):
+ instance = self._fake_instance()
+ instance_type = self._fake_instance_type_create()
+ claim = self.tracker.resize_claim(self.context, instance,
+ instance_type)
+ self.assertEqual(0, claim.memory_mb)
+ self.assertEqual(instance['uuid'], claim.migration['instance_uuid'])
+ self.assertEqual(instance_type['id'],
+ claim.migration['new_instance_type_id'])
+
+ def testDisabledResizeContextClaim(self):
+ instance = self._fake_instance()
+ instance_type = self._fake_instance_type_create()
+ with self.tracker.resize_claim(self.context, instance, instance_type) \
+ as claim:
+ self.assertEqual(0, claim.memory_mb)
+
class MissingServiceTestCase(BaseTestCase):
def setUp(self):
@@ -244,17 +303,39 @@ class MissingComputeNodeTestCase(BaseTestCase):
self.assertFalse(self.tracker.disabled)
-class ResourceTestCase(BaseTestCase):
+class BaseTrackerTestCase(BaseTestCase):
+
def setUp(self):
- super(ResourceTestCase, self).setUp()
+ # setup plumbing for a working resource tracker with required
+ # database models and a compatible compute driver:
+ super(BaseTrackerTestCase, self).setUp()
+
self.tracker = self._tracker()
+ self._migrations = {}
+
self.stubs.Set(db, 'service_get_all_compute_by_host',
self._fake_service_get_all_compute_by_host)
self.stubs.Set(db, 'compute_node_update',
self._fake_compute_node_update)
+ self.stubs.Set(db, 'migration_update',
+ self._fake_migration_update)
+ self.stubs.Set(db, 'migration_get_in_progress_by_host',
+ self._fake_migration_get_in_progress_by_host)
self.tracker.update_available_resource(self.context)
- self.limits = self._basic_limits()
+ self.limits = self._limits()
+
+ self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb')
+ self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb')
+ self._assert(FAKE_VIRT_VCPUS, 'vcpus')
+ self._assert(0, 'memory_mb_used')
+ self._assert(0, 'local_gb_used')
+ self._assert(0, 'vcpus_used')
+ self._assert(0, 'running_vms')
+ self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb')
+ self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb')
+ self.assertFalse(self.tracker.disabled)
+ self.assertEqual(0, self.tracker.compute_node['current_workload'])
def _fake_service_get_all_compute_by_host(self, ctx, host):
self.compute = self._create_compute_node()
@@ -269,36 +350,50 @@ class ResourceTestCase(BaseTestCase):
self.compute.update(values)
return self.compute
- def _basic_limits(self):
- """Get basic limits, no oversubscription"""
+ def _fake_migration_get_in_progress_by_host(self, ctxt, host):
+ status = ['confirmed', 'reverted']
+ migrations = []
+
+ for migration in self._migrations.values():
+ if migration['status'] in status:
+ continue
+
+ uuid = migration['instance_uuid']
+ migration['instance'] = self._instances[uuid]
+ migrations.append(migration)
+
+ return migrations
+
+ def _fake_migration_update(self, ctxt, migration_id, values):
+ # cheat and assume there's only 1 migration present
+ migration = self._migrations.values()[0]
+ migration.update(values)
+ return migration
+
+ def _limits(self, memory_mb=FAKE_VIRT_MEMORY_MB,
+ disk_gb=FAKE_VIRT_LOCAL_GB, vcpus=FAKE_VIRT_VCPUS):
+ """Create limits dictionary used for oversubscribing resources"""
+
return {
- 'memory_mb': FAKE_VIRT_MEMORY_MB * 2,
- 'disk_gb': FAKE_VIRT_LOCAL_GB,
- 'vcpu': FAKE_VIRT_VCPUS,
+ 'memory_mb': memory_mb,
+ 'disk_gb': disk_gb,
+ 'vcpu': vcpus
}
- def test_update_usage_only_for_tracked(self):
- instance = self._fake_instance(memory_mb=3, root_gb=1, ephemeral_gb=1,
- task_state=None)
- self.tracker.update_usage(self.context, instance)
+ def _assert(self, value, field, tracker=None):
- self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
- self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
- self.assertEqual(0, self.tracker.compute_node['current_workload'])
+ if tracker is None:
+ tracker = self.tracker
- claim = self.tracker.instance_claim(self.context, instance,
- self.limits)
- self.assertNotEqual(0, claim.memory_mb)
- self.assertEqual(3, self.tracker.compute_node['memory_mb_used'])
- self.assertEqual(2, self.tracker.compute_node['local_gb_used'])
+ if not field in tracker.compute_node:
+ raise test.TestingException(
+ "'%(field)s' not in compute node." % locals())
+ x = tracker.compute_node[field]
- # now update should actually take effect
- instance['task_state'] = task_states.SCHEDULING
- self.tracker.update_usage(self.context, instance)
+ self.assertEqual(value, x)
- self.assertEqual(3, self.tracker.compute_node['memory_mb_used'])
- self.assertEqual(2, self.tracker.compute_node['local_gb_used'])
- self.assertEqual(1, self.tracker.compute_node['current_workload'])
+
+class TrackerTestCase(BaseTrackerTestCase):
def test_free_ram_resource_value(self):
driver = FakeVirtDriver()
@@ -314,13 +409,33 @@ class ResourceTestCase(BaseTestCase):
self.assertFalse(self.tracker.disabled)
self.assertTrue(self.updated)
- def test_claim_and_audit(self):
- self.assertEqual(5, self.tracker.compute_node['memory_mb'])
- self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
- self.assertEqual(6, self.tracker.compute_node['local_gb'])
- self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
+class InstanceClaimTestCase(BaseTrackerTestCase):
+
+ def test_update_usage_only_for_tracked(self):
+ instance = self._fake_instance(memory_mb=3, root_gb=1, ephemeral_gb=1,
+ task_state=None)
+ self.tracker.update_usage(self.context, instance)
+
+ self._assert(0, 'memory_mb_used')
+ self._assert(0, 'local_gb_used')
+ self._assert(0, 'current_workload')
+ claim = self.tracker.instance_claim(self.context, instance,
+ self.limits)
+ self.assertNotEqual(0, claim.memory_mb)
+ self._assert(3, 'memory_mb_used')
+ self._assert(2, 'local_gb_used')
+
+ # now update should actually take effect
+ instance['task_state'] = task_states.SCHEDULING
+ self.tracker.update_usage(self.context, instance)
+
+ self._assert(3, 'memory_mb_used')
+ self._assert(2, 'local_gb_used')
+ self._assert(1, 'current_workload')
+
+ def test_claim_and_audit(self):
claim_mem = 3
claim_disk = 2
instance = self._fake_instance(memory_mb=claim_mem, root_gb=claim_disk,
@@ -354,12 +469,6 @@ class ResourceTestCase(BaseTestCase):
self.assertEqual(6 - claim_disk, self.compute['free_disk_gb'])
def test_claim_and_abort(self):
- self.assertEqual(5, self.tracker.compute_node['memory_mb'])
- self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
-
- self.assertEqual(6, self.tracker.compute_node['local_gb'])
- self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
-
claim_mem = 3
claim_disk = 2
instance = self._fake_instance(memory_mb=claim_mem,
@@ -368,21 +477,17 @@ class ResourceTestCase(BaseTestCase):
self.limits)
self.assertNotEqual(None, claim)
- self.assertEqual(5, self.compute["memory_mb"])
self.assertEqual(claim_mem, self.compute["memory_mb_used"])
self.assertEqual(5 - claim_mem, self.compute["free_ram_mb"])
- self.assertEqual(6, self.compute["local_gb"])
self.assertEqual(claim_disk, self.compute["local_gb_used"])
self.assertEqual(6 - claim_disk, self.compute["free_disk_gb"])
claim.abort()
- self.assertEqual(5, self.compute["memory_mb"])
self.assertEqual(0, self.compute["memory_mb_used"])
self.assertEqual(5, self.compute["free_ram_mb"])
- self.assertEqual(6, self.compute["local_gb"])
self.assertEqual(0, self.compute["local_gb_used"])
self.assertEqual(6, self.compute["free_disk_gb"])
@@ -450,8 +555,6 @@ class ResourceTestCase(BaseTestCase):
self.assertEqual(2, self.compute['local_gb_used'])
def test_update_load_stats_for_instance(self):
- self.assertFalse(self.tracker.disabled)
- self.assertEqual(0, self.tracker.compute_node['current_workload'])
instance = self._fake_instance(task_state=task_states.SCHEDULING)
with self.tracker.instance_claim(self.context, instance):
@@ -493,3 +596,206 @@ class ResourceTestCase(BaseTestCase):
instance['vm_state'] = vm_states.DELETED
self.tracker.update_usage(self.context, instance)
self.assertEqual(1, self.tracker.compute_node['vcpus_used'])
+
+
+class ResizeClaimTestCase(BaseTrackerTestCase):
+
+ def setUp(self):
+ super(ResizeClaimTestCase, self).setUp()
+
+ self.stubs.Set(db, 'migration_create', self._fake_migration_create)
+
+ self.instance = self._fake_instance()
+ self.instance_type = self._fake_instance_type_create()
+
+ def _fake_migration_create(self, context, values=None):
+ instance_uuid = str(uuid.uuid1())
+ migration = {
+ 'id': 1,
+ 'source_compute': 'host1',
+ 'dest_compute': 'host2',
+ 'dest_host': '127.0.0.1',
+ 'old_instance_type_id': 1,
+ 'new_instance_type_id': 2,
+ 'instance_uuid': instance_uuid,
+ 'status': 'pre-migrating',
+ 'updated_at': timeutils.utcnow()
+ }
+ if values:
+ migration.update(values)
+
+ self._migrations[instance_uuid] = migration
+ return migration
+
+ def test_claim(self):
+ self.tracker.resize_claim(self.context, self.instance,
+ self.instance_type, self.limits)
+ self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb_used')
+ self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
+ self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
+ self.assertEqual(1, len(self.tracker.tracked_migrations))
+
+ def test_abort(self):
+ try:
+ with self.tracker.resize_claim(self.context, self.instance,
+ self.instance_type, self.limits):
+ raise test.TestingException("abort")
+ except test.TestingException:
+ pass
+
+ self._assert(0, 'memory_mb_used')
+ self._assert(0, 'local_gb_used')
+ self._assert(0, 'vcpus_used')
+ self.assertEqual(0, len(self.tracker.tracked_migrations))
+
+ def test_additive_claims(self):
+
+ limits = self._limits(FAKE_VIRT_MEMORY_MB * 2, FAKE_VIRT_LOCAL_GB * 2,
+ FAKE_VIRT_VCPUS * 2)
+ self.tracker.resize_claim(self.context, self.instance,
+ self.instance_type, limits)
+ instance2 = self._fake_instance()
+ self.tracker.resize_claim(self.context, instance2, self.instance_type,
+ limits)
+
+ self._assert(2 * FAKE_VIRT_MEMORY_MB, 'memory_mb_used')
+ self._assert(2 * FAKE_VIRT_LOCAL_GB, 'local_gb_used')
+ self._assert(2 * FAKE_VIRT_VCPUS, 'vcpus_used')
+
+ def test_claim_and_audit(self):
+ self.tracker.resize_claim(self.context, self.instance,
+ self.instance_type, self.limits)
+
+ self.tracker.update_available_resource(self.context)
+
+ self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb_used')
+ self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
+ self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
+
+ def test_same_host(self):
+ self.limits['vcpu'] = 3
+
+ src_type = self._fake_instance_type_create(id=2, memory_mb=1,
+ root_gb=1, ephemeral_gb=0, vcpus=1)
+ dest_type = self._fake_instance_type_create(id=2, memory_mb=2,
+ root_gb=2, ephemeral_gb=1, vcpus=2)
+
+ # make an instance of src_type:
+ instance = self._fake_instance(memory_mb=1, root_gb=1, ephemeral_gb=0,
+ vcpus=1, instance_type_id=2)
+
+ self.tracker.instance_claim(self.context, instance, self.limits)
+
+ # resize to dest_type:
+ claim = self.tracker.resize_claim(self.context, self.instance,
+ dest_type, self.limits)
+
+ self._assert(3, 'memory_mb_used')
+ self._assert(4, 'local_gb_used')
+ self._assert(3, 'vcpus_used')
+
+ self.tracker.update_available_resource(self.context)
+ claim.abort()
+
+ # only the original instance should remain, not the migration:
+ self._assert(1, 'memory_mb_used')
+ self._assert(1, 'local_gb_used')
+ self._assert(1, 'vcpus_used')
+ self.assertEqual(1, len(self.tracker.tracked_instances))
+ self.assertEqual(0, len(self.tracker.tracked_migrations))
+
+ def test_revert(self):
+ self.tracker.resize_claim(self.context, self.instance,
+ self.instance_type, self.limits)
+ migration, itype = self.tracker.tracked_migrations[
+ self.instance['uuid']]
+ self.tracker.revert_resize(self.context, migration)
+
+ self.assertEqual(0, len(self.tracker.tracked_instances))
+ self.assertEqual(0, len(self.tracker.tracked_migrations))
+ self._assert(0, 'memory_mb_used')
+ self._assert(0, 'local_gb_used')
+ self._assert(0, 'vcpus_used')
+
+ def test_revert_reserve_source(self):
+ # if a revert has started at the API and audit runs on
+ # the source compute before the instance flips back to source,
+ # resources should still be help at the source based on the
+ # migration:
+ dest = "desthost"
+ dest_tracker = self._tracker(host=dest)
+ dest_tracker.update_available_resource(self.context)
+
+ self.instance = self._fake_instance(memory_mb=FAKE_VIRT_MEMORY_MB,
+ root_gb=FAKE_VIRT_LOCAL_GB, ephemeral_gb=0,
+ vcpus=FAKE_VIRT_VCPUS, instance_type_id=1)
+
+ values = {'source_compute': self.host, 'dest_compute': dest,
+ 'old_instance_type_id': 1, 'new_instance_type_id': 1,
+ 'status': 'post-migrating',
+ 'instance_uuid': self.instance['uuid']}
+ migration = self._fake_migration_create(self.context, values)
+
+ # attach an instance to the destination host tracker:
+ dest_tracker.instance_claim(self.context, self.instance)
+
+ self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb_used',
+ tracker=dest_tracker)
+ self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used',
+ tracker=dest_tracker)
+ self._assert(FAKE_VIRT_VCPUS, 'vcpus_used',
+ tracker=dest_tracker)
+
+ # audit and recheck to confirm migration doesn't get double counted
+ # on dest:
+ dest_tracker.update_available_resource(self.context)
+
+ self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb_used',
+ tracker=dest_tracker)
+ self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used',
+ tracker=dest_tracker)
+ self._assert(FAKE_VIRT_VCPUS, 'vcpus_used',
+ tracker=dest_tracker)
+
+ # apply the migration to the source host tracker:
+ self.tracker.update_available_resource(self.context)
+
+ self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb_used')
+ self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
+ self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
+
+ # flag the instance and migration as reverting and re-audit:
+ self.instance['vm_state'] = vm_states.RESIZED
+ self.instance['task_state'] = task_states.RESIZE_REVERTING
+ self.tracker.update_available_resource(self.context)
+
+ self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb_used')
+ self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
+ self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
+
+ def test_resize_filter(self):
+ instance = self._fake_instance(vm_state=vm_states.ACTIVE,
+ task_state=task_states.SUSPENDING)
+ self.assertFalse(self.tracker._instance_in_resize_state(instance))
+
+ instance = self._fake_instance(vm_state=vm_states.RESIZED,
+ task_state=task_states.SUSPENDING)
+ self.assertTrue(self.tracker._instance_in_resize_state(instance))
+
+ instance = self._fake_instance(vm_state=vm_states.ACTIVE,
+ task_state=task_states.RESIZE_MIGRATING)
+ self.assertTrue(self.tracker._instance_in_resize_state(instance))
+
+ def test_dupe_filter(self):
+ self._fake_instance_type_create(id=2, memory_mb=1, root_gb=1,
+ ephemeral_gb=1, vcpus=1)
+
+ instance = self._fake_instance(host=self.host)
+
+ values = {'source_compute': self.host, 'dest_compute': self.host,
+ 'instance_uuid': instance['uuid'], 'new_instance_type_id': 2}
+ self._fake_migration_create(self.context, values)
+ self._fake_migration_create(self.context, values)
+
+ self.tracker.update_available_resource(self.context)
+ self.assertEqual(1, len(self.tracker.tracked_migrations))
diff --git a/nova/tests/compute/test_rpcapi.py b/nova/tests/compute/test_rpcapi.py
index 1edfa771f..fac95cc62 100644
--- a/nova/tests/compute/test_rpcapi.py
+++ b/nova/tests/compute/test_rpcapi.py
@@ -19,15 +19,14 @@ Unit Tests for nova.compute.rpcapi
"""
from nova.compute import rpcapi as compute_rpcapi
+from nova import config
from nova import context
from nova import db
-from nova import flags
from nova.openstack.common import jsonutils
from nova.openstack.common import rpc
from nova import test
-
-FLAGS = flags.FLAGS
+CONF = config.CONF
class ComputeRpcAPITestCase(test.TestCase):
@@ -74,7 +73,7 @@ class ComputeRpcAPITestCase(test.TestCase):
host = kwargs['destination']
else:
host = kwargs['instance']['host']
- expected_topic = '%s.%s' % (FLAGS.compute_topic, host)
+ expected_topic = '%s.%s' % (CONF.compute_topic, host)
self.fake_args = None
self.fake_kwargs = None
@@ -174,6 +173,9 @@ class ComputeRpcAPITestCase(test.TestCase):
self._test_compute_api('host_power_action', 'call', action='action',
host='host')
+ def test_get_backdoor_port(self):
+ self._test_compute_api('get_backdoor_port', 'call', host='host')
+
def test_inject_file(self):
self._test_compute_api('inject_file', 'cast',
instance=self.fake_instance, path='path', file_contents='fc')
@@ -264,8 +266,8 @@ class ComputeRpcAPITestCase(test.TestCase):
def test_remove_aggregate_host(self):
self._test_compute_api('remove_aggregate_host', 'cast',
- aggregate_id='id', host_param='host', host='host',
- slave_info={}, version='2.2')
+ aggregate={'id': 'fake_id'}, host_param='host', host='host',
+ slave_info={}, version='2.15')
def test_remove_fixed_ip_from_instance(self):
self._test_compute_api('remove_fixed_ip_from_instance', 'cast',
@@ -286,7 +288,8 @@ class ComputeRpcAPITestCase(test.TestCase):
def test_resize_instance(self):
self._test_compute_api('resize_instance', 'cast',
instance=self.fake_instance, migration={'id': 'fake_id'},
- image='image', reservations=list('fake_res'), version='2.6')
+ image='image', instance_type={'id': 1},
+ reservations=list('fake_res'), version='2.16')
def test_resume_instance(self):
self._test_compute_api('resume_instance', 'cast',
diff --git a/nova/tests/baremetal/__init__.py b/nova/tests/conductor/__init__.py
index e69de29bb..e69de29bb 100644
--- a/nova/tests/baremetal/__init__.py
+++ b/nova/tests/conductor/__init__.py
diff --git a/nova/tests/conductor/test_conductor.py b/nova/tests/conductor/test_conductor.py
new file mode 100644
index 000000000..fbf7d0325
--- /dev/null
+++ b/nova/tests/conductor/test_conductor.py
@@ -0,0 +1,133 @@
+# Copyright 2012 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Tests for the conductor service"""
+
+from nova.compute import instance_types
+from nova.compute import vm_states
+from nova import conductor
+from nova.conductor import api as conductor_api
+from nova.conductor import manager as conductor_manager
+from nova.conductor import rpcapi as conductor_rpcapi
+from nova import context
+from nova import db
+from nova import notifications
+from nova import test
+
+
+FAKE_IMAGE_REF = 'fake-image-ref'
+
+
+class BaseTestCase(test.TestCase):
+ def setUp(self):
+ super(BaseTestCase, self).setUp()
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id,
+ self.project_id)
+
+ def _create_fake_instance(self, params=None, type_name='m1.tiny'):
+ if not params:
+ params = {}
+
+ inst = {}
+ inst['vm_state'] = vm_states.ACTIVE
+ inst['image_ref'] = FAKE_IMAGE_REF
+ inst['reservation_id'] = 'r-fakeres'
+ inst['launch_time'] = '10'
+ inst['user_id'] = self.user_id
+ inst['project_id'] = self.project_id
+ inst['host'] = 'fake_host'
+ type_id = instance_types.get_instance_type_by_name(type_name)['id']
+ inst['instance_type_id'] = type_id
+ inst['ami_launch_index'] = 0
+ inst['memory_mb'] = 0
+ inst['vcpus'] = 0
+ inst['root_gb'] = 0
+ inst['ephemeral_gb'] = 0
+ inst['architecture'] = 'x86_64'
+ inst['os_type'] = 'Linux'
+ inst.update(params)
+ return db.instance_create(self.context, inst)
+
+
+class ConductorTestCase(BaseTestCase):
+ """Conductor Manager Tests"""
+ def setUp(self):
+ super(ConductorTestCase, self).setUp()
+ self.conductor = conductor_manager.ConductorManager()
+ self.db = None
+
+ def _do_update(self, instance_uuid, **updates):
+ return self.conductor.instance_update(self.context, instance_uuid,
+ updates)
+
+ def test_instance_update(self):
+ instance = self._create_fake_instance()
+ new_inst = self._do_update(instance['uuid'],
+ vm_state=vm_states.STOPPED)
+ instance = db.instance_get_by_uuid(self.context, instance['uuid'])
+ self.assertEqual(instance['vm_state'], vm_states.STOPPED)
+ self.assertEqual(new_inst['vm_state'], instance['vm_state'])
+
+ def test_instance_update_invalid_key(self):
+ # NOTE(danms): the real DB API call ignores invalid keys
+ if self.db == None:
+ self.assertRaises(KeyError,
+ self._do_update, 'any-uuid', foobar=1)
+
+
+class ConductorRPCAPITestCase(ConductorTestCase):
+ """Conductor RPC API Tests"""
+ def setUp(self):
+ super(ConductorRPCAPITestCase, self).setUp()
+ self.conductor_service = self.start_service(
+ 'conductor', manager='nova.conductor.manager.ConductorManager')
+ self.conductor = conductor_rpcapi.ConductorAPI()
+
+
+class ConductorLocalAPITestCase(ConductorTestCase):
+ """Conductor LocalAPI Tests"""
+ def setUp(self):
+ super(ConductorLocalAPITestCase, self).setUp()
+ self.conductor = conductor_api.LocalAPI()
+ self.db = db
+
+ def _do_update(self, instance_uuid, **updates):
+ # NOTE(danms): the public API takes actual keyword arguments,
+ # so override the base class here to make the call correctly
+ return self.conductor.instance_update(self.context, instance_uuid,
+ **updates)
+
+
+class ConductorAPITestCase(ConductorLocalAPITestCase):
+ """Conductor API Tests"""
+ def setUp(self):
+ super(ConductorAPITestCase, self).setUp()
+ self.conductor_service = self.start_service(
+ 'conductor', manager='nova.conductor.manager.ConductorManager')
+ self.conductor = conductor_api.API()
+ self.db = None
+
+
+class ConductorImportTest(test.TestCase):
+ def test_import_conductor_local(self):
+ self.flags(use_local=True, group='conductor')
+ self.assertTrue(isinstance(conductor.API(),
+ conductor_api.LocalAPI))
+
+ def test_import_conductor_rpc(self):
+ self.flags(use_local=False, group='conductor')
+ self.assertTrue(isinstance(conductor.API(),
+ conductor_api.API))
diff --git a/nova/tests/console/test_console.py b/nova/tests/console/test_console.py
index 2b50edf05..372fbd1df 100644
--- a/nova/tests/console/test_console.py
+++ b/nova/tests/console/test_console.py
@@ -24,12 +24,10 @@ from nova.console import rpcapi as console_rpcapi
from nova import context
from nova import db
from nova import exception
-from nova import flags
from nova.openstack.common import importutils
from nova.openstack.common import rpc
from nova import test
-FLAGS = flags.FLAGS
CONF = config.CONF
CONF.import_opt('console_driver', 'nova.console.manager')
@@ -40,7 +38,7 @@ class ConsoleTestCase(test.TestCase):
super(ConsoleTestCase, self).setUp()
self.flags(console_driver='nova.console.fake.FakeConsoleProxy',
stub_compute=True)
- self.console = importutils.import_object(FLAGS.console_manager)
+ self.console = importutils.import_object(CONF.console_manager)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
diff --git a/nova/tests/console/test_rpcapi.py b/nova/tests/console/test_rpcapi.py
index 9499002c6..0df7491c5 100644
--- a/nova/tests/console/test_rpcapi.py
+++ b/nova/tests/console/test_rpcapi.py
@@ -18,14 +18,13 @@
Unit Tests for nova.console.rpcapi
"""
+from nova import config
from nova.console import rpcapi as console_rpcapi
from nova import context
-from nova import flags
from nova.openstack.common import rpc
from nova import test
-
-FLAGS = flags.FLAGS
+CONF = config.CONF
class ConsoleRpcAPITestCase(test.TestCase):
@@ -49,7 +48,7 @@ class ConsoleRpcAPITestCase(test.TestCase):
getattr(rpcapi, method)(ctxt, **kwargs)
self.assertEqual(self.cast_ctxt, ctxt)
- self.assertEqual(self.cast_topic, FLAGS.console_topic)
+ self.assertEqual(self.cast_topic, CONF.console_topic)
self.assertEqual(self.cast_msg, expected_msg)
def test_add_console(self):
diff --git a/nova/tests/consoleauth/test_consoleauth.py b/nova/tests/consoleauth/test_consoleauth.py
index da50eb83b..5ff15b23b 100644
--- a/nova/tests/consoleauth/test_consoleauth.py
+++ b/nova/tests/consoleauth/test_consoleauth.py
@@ -22,14 +22,12 @@ Tests for Consoleauth Code.
import time
+from nova import config
from nova.consoleauth import manager
from nova import context
-from nova import flags
from nova.openstack.common import log as logging
from nova import test
-
-FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
diff --git a/nova/tests/consoleauth/test_rpcapi.py b/nova/tests/consoleauth/test_rpcapi.py
index c59e322b8..428a79e3a 100644
--- a/nova/tests/consoleauth/test_rpcapi.py
+++ b/nova/tests/consoleauth/test_rpcapi.py
@@ -18,14 +18,13 @@
Unit Tests for nova.consoleauth.rpcapi
"""
+from nova import config
from nova.consoleauth import rpcapi as consoleauth_rpcapi
from nova import context
-from nova import flags
from nova.openstack.common import rpc
from nova import test
-
-FLAGS = flags.FLAGS
+CONF = config.CONF
class ConsoleAuthRpcAPITestCase(test.TestCase):
@@ -54,7 +53,7 @@ class ConsoleAuthRpcAPITestCase(test.TestCase):
self.assertEqual(retval, expected_retval)
self.assertEqual(self.call_ctxt, ctxt)
- self.assertEqual(self.call_topic, FLAGS.consoleauth_topic)
+ self.assertEqual(self.call_topic, CONF.consoleauth_topic)
self.assertEqual(self.call_msg, expected_msg)
self.assertEqual(self.call_timeout, None)
diff --git a/nova/tests/db/fakes.py b/nova/tests/db/fakes.py
index a78fd2e12..653edf58a 100644
--- a/nova/tests/db/fakes.py
+++ b/nova/tests/db/fakes.py
@@ -68,7 +68,6 @@ def stub_out_db_network_api(stubs):
fixed_ip_fields = {'id': 0,
'network_id': 0,
- 'network': FakeModel(network_fields),
'address': '192.168.0.100',
'instance': False,
'instance_id': 0,
@@ -208,15 +207,6 @@ def stub_out_db_network_api(stubs):
if ips:
return FakeModel(ips[0])
- def fake_fixed_ip_get_network(context, address):
- ips = filter(lambda i: i['address'] == address,
- fixed_ips)
- if ips:
- nets = filter(lambda n: n['id'] == ips[0]['network_id'],
- networks)
- if nets:
- return FakeModel(nets[0])
-
def fake_fixed_ip_update(context, address, values):
ips = filter(lambda i: i['address'] == address,
fixed_ips)
@@ -318,7 +308,6 @@ def stub_out_db_network_api(stubs):
fake_fixed_ip_disassociate_all_by_timeout,
fake_fixed_ip_get_by_instance,
fake_fixed_ip_get_by_address,
- fake_fixed_ip_get_network,
fake_fixed_ip_update,
fake_instance_type_get,
fake_virtual_interface_create,
diff --git a/nova/tests/declare_flags.py b/nova/tests/declare_flags.py
index 9e8fee123..615ee35b3 100644
--- a/nova/tests/declare_flags.py
+++ b/nova/tests/declare_flags.py
@@ -16,8 +16,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-from nova import flags
+from nova import config
from nova.openstack.common import cfg
-FLAGS = flags.FLAGS
-FLAGS.register_opt(cfg.IntOpt('answer', default=42, help='test flag'))
+CONF = config.CONF
+CONF.register_opt(cfg.IntOpt('answer', default=42, help='test flag'))
diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py
index f8661e434..65191641d 100644
--- a/nova/tests/fake_flags.py
+++ b/nova/tests/fake_flags.py
@@ -17,13 +17,11 @@
# under the License.
from nova import config
-from nova import flags
CONF = config.CONF
CONF.import_opt('scheduler_driver', 'nova.scheduler.manager')
CONF.import_opt('fake_network', 'nova.network.manager')
-CONF.import_opt('iscsi_num_targets', 'nova.volume.driver')
CONF.import_opt('network_size', 'nova.network.manager')
CONF.import_opt('num_networks', 'nova.network.manager')
CONF.import_opt('policy_file', 'nova.policy')
@@ -35,7 +33,6 @@ def set_defaults(conf):
conf.set_default('fake_network', True)
conf.set_default('fake_rabbit', True)
conf.set_default('flat_network_bridge', 'br100')
- conf.set_default('iscsi_num_targets', 8)
conf.set_default('network_size', 8)
conf.set_default('num_networks', 2)
conf.set_default('vlan_interface', 'eth0')
diff --git a/nova/tests/fake_loadables/__init__.py b/nova/tests/fake_loadables/__init__.py
new file mode 100644
index 000000000..824243347
--- /dev/null
+++ b/nova/tests/fake_loadables/__init__.py
@@ -0,0 +1,27 @@
+# Copyright 2012 OpenStack LLC. # All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Fakes For Loadable class handling.
+"""
+
+from nova import loadables
+
+
+class FakeLoadable(object):
+ pass
+
+
+class FakeLoader(loadables.BaseLoader):
+ def __init__(self):
+ super(FakeLoader, self).__init__(FakeLoadable)
diff --git a/nova/tests/fake_loadables/fake_loadable1.py b/nova/tests/fake_loadables/fake_loadable1.py
new file mode 100644
index 000000000..58f9704b3
--- /dev/null
+++ b/nova/tests/fake_loadables/fake_loadable1.py
@@ -0,0 +1,44 @@
+# Copyright 2012 OpenStack LLC. # All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Fake Loadable subclasses module #1
+"""
+
+from nova.tests import fake_loadables
+
+
+class FakeLoadableSubClass1(fake_loadables.FakeLoadable):
+ pass
+
+
+class FakeLoadableSubClass2(fake_loadables.FakeLoadable):
+ pass
+
+
+class _FakeLoadableSubClass3(fake_loadables.FakeLoadable):
+ """Classes beginning with '_' will be ignored."""
+ pass
+
+
+class FakeLoadableSubClass4(object):
+ """Not a correct subclass."""
+
+
+def return_valid_classes():
+ return [FakeLoadableSubClass1, FakeLoadableSubClass2]
+
+
+def return_invalid_classes():
+ return [FakeLoadableSubClass1, _FakeLoadableSubClass3,
+ FakeLoadableSubClass4]
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/085_add_index_to_fixed_ips_by_address.py b/nova/tests/fake_loadables/fake_loadable2.py
index 5e24b42c3..3e365effc 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/085_add_index_to_fixed_ips_by_address.py
+++ b/nova/tests/fake_loadables/fake_loadable2.py
@@ -1,6 +1,4 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 IBM
+# Copyright 2012 OpenStack LLC. # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@@ -13,21 +11,29 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+"""
+Fake Loadable subclasses module #2
+"""
+
+from nova.tests import fake_loadables
+
+
+class FakeLoadableSubClass5(fake_loadables.FakeLoadable):
+ pass
+
+
+class FakeLoadableSubClass6(fake_loadables.FakeLoadable):
+ pass
+
-from sqlalchemy import Index, MetaData, Table
+class _FakeLoadableSubClass7(fake_loadables.FakeLoadable):
+ """Classes beginning with '_' will be ignored."""
+ pass
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- instances = Table('fixed_ips', meta, autoload=True)
- index = Index('address', instances.c.address)
- index.create(migrate_engine)
+class FakeLoadableSubClass8(BaseException):
+ """Not a correct subclass."""
-def downgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- instances = Table('fixed_ips', meta, autoload=True)
- index = Index('address', instances.c.address)
- index.drop(migrate_engine)
+def return_valid_class():
+ return [FakeLoadableSubClass6]
diff --git a/nova/tests/fake_network.py b/nova/tests/fake_network.py
index ff22278c5..50c6a55a8 100644
--- a/nova/tests/fake_network.py
+++ b/nova/tests/fake_network.py
@@ -17,10 +17,10 @@
from nova.compute import api as compute_api
from nova.compute import manager as compute_manager
+from nova import config
import nova.context
from nova import db
from nova import exception
-from nova import flags
from nova.network import api as network_api
from nova.network import manager as network_manager
from nova.network import model as network_model
@@ -30,7 +30,7 @@ from nova.virt.libvirt import config as libvirt_config
HOST = "testhost"
-FLAGS = flags.FLAGS
+CONF = config.CONF
class FakeIptablesFirewallDriver(object):
@@ -158,7 +158,7 @@ class FakeNetworkManager(network_manager.NetworkManager):
def fake_network(network_id, ipv6=None):
if ipv6 is None:
- ipv6 = FLAGS.use_ipv6
+ ipv6 = CONF.use_ipv6
fake_network = {'id': network_id,
'uuid': '00000000-0000-0000-0000-00000000000000%02d' % network_id,
'label': 'test%d' % network_id,
@@ -185,7 +185,7 @@ def fake_network(network_id, ipv6=None):
fake_network['cidr_v6'] = '2001:db8:0:%x::/64' % network_id
fake_network['gateway_v6'] = '2001:db8:0:%x::1' % network_id
fake_network['netmask_v6'] = '64'
- if FLAGS.flat_injected:
+ if CONF.flat_injected:
fake_network['injected'] = True
return fake_network
@@ -433,7 +433,7 @@ def _get_fake_cache():
'label': 'private',
'subnets': [{'cidr': '192.168.0.0/24',
'ips': [_ip('192.168.0.3')]}]}}]
- if FLAGS.use_ipv6:
+ if CONF.use_ipv6:
ipv6_addr = 'fe80:b33f::a8bb:ccff:fedd:eeff'
info[0]['network']['subnets'].append({'cidr': 'fe80:b33f::/64',
'ips': [_ip(ipv6_addr)]})
diff --git a/nova/tests/fake_volume.py b/nova/tests/fake_volume.py
index 54fd85fe5..f490b6705 100644
--- a/nova/tests/fake_volume.py
+++ b/nova/tests/fake_volume.py
@@ -14,10 +14,12 @@
"""Implementation of a fake volume API"""
+import uuid
+
from nova import exception
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
-from nova import utils
+
LOG = logging.getLogger(__name__)
@@ -34,7 +36,7 @@ class fake_volume():
if snapshot is not None:
snapshot_id = snapshot['id']
if volume_id is None:
- volume_id = str(utils.gen_uuid())
+ volume_id = str(uuid.uuid4())
self.vol = {
'created_at': timeutils.utcnow(),
'deleted_at': None,
@@ -79,7 +81,7 @@ class fake_snapshot():
def __init__(self, volume_id, size, name, desc, id=None):
if id is None:
- id = str(utils.gen_uuid())
+ id = str(uuid.uuid4())
self.snap = {
'created_at': timeutils.utcnow(),
'deleted_at': None,
diff --git a/nova/tests/hyperv/README.rst b/nova/tests/hyperv/README.rst
new file mode 100644
index 000000000..c7ba16046
--- /dev/null
+++ b/nova/tests/hyperv/README.rst
@@ -0,0 +1,83 @@
+=====================================
+OpenStack Hyper-V Nova Testing Architecture
+=====================================
+
+The Hyper-V Nova Compute plugin uses Windows Management Instrumentation (WMI)
+as the main API for hypervisor related operations.
+WMI has a database / procedural oriented nature that can become difficult to
+test with a traditional static mock / stub based unit testing approach.
+
+The included Hyper-V testing framework has been developed with the
+following goals:
+
+1) Dynamic mock generation.
+2) Decoupling. No dependencies on WMI or any other module.
+ The tests are designed to work with mocked objects in all cases, including
+ OS-dependent (e.g. wmi, os, subprocess) and non-deterministic
+ (e.g. time, uuid) modules
+3) Transparency. Mocks and real objects can be swapped via DI
+ or monkey patching.
+4) Platform independence.
+5) Tests need to be executed against the real object or against the mocks
+ with a simple configuration switch. Development efforts can highly
+ benefit from this feature.
+6) It must be possible to change a mock's behavior without running the tests
+ against the hypervisor (e.g. by manually adding a value / return value).
+
+The tests included in this package include dynamically generated mock objects,
+based on the recording of the attribute values and invocations on the
+real WMI objects and other OS dependent features.
+The generated mock objects are serialized in the nova/tests/hyperv/stubs
+directory as gzipped pickled objects.
+
+An environment variable controls the execution mode of the tests.
+
+Recording mode:
+
+NOVA_GENERATE_TEST_MOCKS=True
+Tests are executed on the hypervisor (without mocks), and mock objects are
+generated.
+
+Replay mode:
+
+NOVA_GENERATE_TEST_MOCKS=
+Tests are executed with the existing mock objects (default).
+
+Mock generation is performed by nova.tests.hyperv.mockproxy.MockProxy.
+Instances of this class wrap objects that need to be mocked and act as a
+delegate on the wrapped object by leveraging Python's __getattr__ feature.
+Attribute values and method call return values are recorded at each access.
+Objects returned by attributes and method invocations are wrapped in a
+MockProxy consistently.
+From a caller perspective, the MockProxy is completely transparent,
+with the exception of calls to the type(...) builtin function.
+
+At the end of the test, a mock is generated by each MockProxy by calling
+the get_mock() method. A mock is represented by an instance of the
+nova.tests.hyperv.mockproxy.Mock class.
+
+The Mock class task consists of replicating the behaviour of the mocked
+objects / modules by returning the same values in the same order, for example:
+
+def check_path(path):
+ if not os.path.exists(path):
+ os.makedirs(path)
+
+check_path(path)
+# The second time os.path.exists returns True
+check_path(path)
+
+The injection of MockProxy / Mock instances is performed by the
+nova.tests.hyperv.basetestcase.BaseTestCase class in the setUp()
+method via selective monkey patching.
+Mocks are serialized in tearDown() during recording.
+
+The actual Hyper-V test case inherits from BaseTestCase:
+nova.tests.hyperv.test_hypervapi.HyperVAPITestCase
+
+
+Future directions:
+
+1) Replace the pickled files with a more generic serialization option (e.g. json)
+2) Add methods to statically extend the mocks (e.g. method call return values)
+3) Extend an existing framework, e.g. mox
diff --git a/nova/tests/hyperv/basetestcase.py b/nova/tests/hyperv/basetestcase.py
index 318cf2e28..4458dbd9d 100644
--- a/nova/tests/hyperv/basetestcase.py
+++ b/nova/tests/hyperv/basetestcase.py
@@ -21,6 +21,7 @@ TestCase for MockProxy based tests and related classes.
import gzip
import os
import pickle
+import sys
from nova import test
from nova.tests.hyperv import mockproxy
@@ -77,7 +78,8 @@ class BaseTestCase(test.TestCase):
not in ['true', 'yes', '1']:
m = self._load_mock(module_name)
else:
- module = __import__(module_name)
+ __import__(module_name)
+ module = sys.modules[module_name]
m = mockproxy.MockProxy(module)
self._mps[module_name] = m
return m
diff --git a/nova/tests/hyperv/db_fakes.py b/nova/tests/hyperv/db_fakes.py
index 9f5572fd1..16d894df8 100644
--- a/nova/tests/hyperv/db_fakes.py
+++ b/nova/tests/hyperv/db_fakes.py
@@ -19,6 +19,7 @@ Stubouts, mocks and fixtures for the test suite
"""
import time
+import uuid
from nova.compute import task_states
from nova.compute import vm_states
@@ -29,14 +30,20 @@ from nova import utils
def get_fake_instance_data(name, project_id, user_id):
return {'name': name,
'id': 1,
- 'uuid': utils.gen_uuid(),
+ 'uuid': str(uuid.uuid4()),
'project_id': project_id,
'user_id': user_id,
'image_ref': "1",
'kernel_id': "1",
'ramdisk_id': "1",
'mac_address': "de:ad:be:ef:be:ef",
- 'instance_type': 'm1.tiny',
+ 'instance_type':
+ {'name': 'm1.tiny',
+ 'memory_mb': 512,
+ 'vcpus': 1,
+ 'root_gb': 0,
+ 'flavorid': 1,
+ 'rxtx_factor': 1}
}
@@ -104,14 +111,20 @@ def stub_out_db_instance_api(stubs):
def __init__(self, values):
self.values = values
+ def get(self, key, default=None):
+ if key in self.values:
+ return self.values[key]
+ else:
+ return default
+
def __getattr__(self, name):
return self.values[name]
def __getitem__(self, key):
- if key in self.values:
- return self.values[key]
- else:
- raise NotImplementedError()
+ return self.get(key)
+
+ def __str__(self):
+ return str(self.values)
def fake_instance_create(context, values):
"""Stubs out the db.instance_create method."""
@@ -119,12 +132,12 @@ def stub_out_db_instance_api(stubs):
if 'instance_type' not in values:
return
- type_data = INSTANCE_TYPES[values['instance_type']]
+ instance_type = values['instance_type']
base_options = {
'name': values['name'],
'id': values['id'],
- 'uuid': utils.gen_uuid(),
+ 'uuid': str(uuid.uuid4()),
'reservation_id': utils.generate_uid('r'),
'image_ref': values['image_ref'],
'kernel_id': values['kernel_id'],
@@ -134,11 +147,11 @@ def stub_out_db_instance_api(stubs):
'user_id': values['user_id'],
'project_id': values['project_id'],
'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
- 'instance_type': values['instance_type'],
- 'memory_mb': type_data['memory_mb'],
- 'vcpus': type_data['vcpus'],
+ 'instance_type': instance_type,
+ 'memory_mb': instance_type['memory_mb'],
+ 'vcpus': instance_type['vcpus'],
'mac_addresses': [{'address': values['mac_address']}],
- 'root_gb': type_data['root_gb'],
+ 'root_gb': instance_type['root_gb'],
}
return FakeModel(base_options)
@@ -160,7 +173,12 @@ def stub_out_db_instance_api(stubs):
def fake_instance_type_get_by_name(context, name):
return INSTANCE_TYPES[name]
+ def fake_block_device_mapping_get_all_by_instance(context, instance_uuid):
+ return {}
+
stubs.Set(db, 'instance_create', fake_instance_create)
stubs.Set(db, 'network_get_by_instance', fake_network_get_by_instance)
stubs.Set(db, 'instance_type_get_all', fake_instance_type_get_all)
stubs.Set(db, 'instance_type_get_by_name', fake_instance_type_get_by_name)
+ stubs.Set(db, 'block_device_mapping_get_all_by_instance',
+ fake_block_device_mapping_get_all_by_instance)
diff --git a/nova/tests/hyperv/hypervutils.py b/nova/tests/hyperv/hypervutils.py
index 7cf9f32fe..65207d9f5 100644
--- a/nova/tests/hyperv/hypervutils.py
+++ b/nova/tests/hyperv/hypervutils.py
@@ -165,7 +165,13 @@ class HyperVUtils(object):
drive_path = hostResources[0]
volume_drives.append(drive_path)
- return (disk_files, volume_drives)
+ dvds = [r for r in rasds
+ if r.ResourceSubType == 'Microsoft Virtual CD/DVD Disk']
+ dvd_files = []
+ for dvd in dvds:
+ dvd_files.extend([c for c in dvd.Connection])
+
+ return (disk_files, volume_drives, dvd_files)
def remove_remote_vm(self, server, vm_name):
conn = wmi.WMI(moniker='//' + server + '/root/virtualization')
@@ -181,7 +187,8 @@ class HyperVUtils(object):
#Stop the VM first.
self._set_vm_state(conn, vm_name, 3)
- (disk_files, volume_drives) = self._get_vm_disks(conn, vm_name)
+ (disk_files, volume_drives, dvd_files) = self._get_vm_disks(conn,
+ vm_name)
(job, ret_val) = vs_man_svc.DestroyVirtualSystem(vm.path_())
if ret_val == constants.WMI_JOB_STATUS_STARTED:
@@ -192,7 +199,7 @@ class HyperVUtils(object):
raise Exception(_('Failed to destroy vm %s') % vm_name)
#Delete associated vhd disk files.
- for disk in disk_files:
+ for disk in disk_files + dvd_files:
vhd_file = conn_cimv2.query(
"Select * from CIM_DataFile where Name = '" +
disk.replace("'", "''") + "'")[0]
diff --git a/nova/tests/hyperv/mockproxy.py b/nova/tests/hyperv/mockproxy.py
index ff04ea709..1aeaf44f8 100644
--- a/nova/tests/hyperv/mockproxy.py
+++ b/nova/tests/hyperv/mockproxy.py
@@ -18,6 +18,7 @@ Classes for dynamic generation of mock objects.
"""
import inspect
+import pickle
def serialize_obj(obj):
@@ -39,7 +40,12 @@ def serialize_obj(obj):
l1 = l1 + (serialize_obj(i1),)
val = str(l1)
else:
- val = str(obj)
+ if isinstance(obj, str) or isinstance(obj, unicode):
+ val = obj
+ elif hasattr(obj, '__str__') and inspect.ismethod(obj.__str__):
+ val = str(obj)
+ else:
+ val = str(type(obj))
return val
@@ -48,6 +54,11 @@ def serialize_args(*args, **kwargs):
return serialize_obj((args, kwargs))
+class MockException(Exception):
+ def __init__(self, message):
+ super(MockException, self).__init__(message)
+
+
class Mock(object):
def _get_next_value(self, name):
c = self._access_count.get(name)
@@ -56,7 +67,13 @@ class Mock(object):
else:
c = c + 1
self._access_count[name] = c
- return self._values[name][c]
+
+ try:
+ value = self._values[name][c]
+ except IndexError as ex:
+ raise MockException(_('Couldn\'t find invocation num. %(c)d '
+ 'of attribute "%(name)s"') % locals())
+ return value
def _get_next_ret_value(self, name, params):
d = self._access_count.get(name)
@@ -69,7 +86,23 @@ class Mock(object):
else:
c = c + 1
d[params] = c
- return self._values[name][params][c]
+
+ try:
+ m = self._values[name]
+ except KeyError as ex:
+ raise MockException(_('Couldn\'t find attribute "%s"') % (name))
+
+ try:
+ value = m[params][c]
+ except KeyError as ex:
+ raise MockException(_('Couldn\'t find attribute "%(name)s" '
+ 'with arguments "%(params)s"') % locals())
+ except IndexError as ex:
+ raise MockException(_('Couldn\'t find invocation num. %(c)d '
+ 'of attribute "%(name)s" with arguments "%(params)s"')
+ % locals())
+
+ return value
def __init__(self, values):
self._values = values
@@ -82,7 +115,13 @@ class Mock(object):
if name.startswith('__') and name.endswith('__'):
return object.__getattribute__(self, name)
else:
- if isinstance(self._values[name], dict):
+ try:
+ isdict = isinstance(self._values[name], dict)
+ except KeyError as ex:
+ raise MockException(_('Couldn\'t find attribute "%s"')
+ % (name))
+
+ if isdict:
def newfunc(*args, **kwargs):
params = serialize_args(args, kwargs)
return self._get_next_ret_value(name, params)
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_nova.utils.p.gz
new file mode 100644
index 000000000..4b4a0e83b
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..b3c789fab
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_os.p.gz
index 009a2d45d..6c91bfc7d 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_os.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_shutil.p.gz
index cb7818abb..1e573655f 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_subprocess.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_subprocess.p.gz
index d4005b336..6a692b3d8 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_subprocess.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_subprocess.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_time.p.gz
index 041d7314a..bf6c99e6d 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_time.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_uuid.p.gz
index cab9cd580..ecdf50fdd 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_nova.utils.p.gz
new file mode 100644
index 000000000..aca0d6f0c
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..bbeec53df
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_os.p.gz
index 0dfe439ca..3bf9bd13a 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_os.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_shutil.p.gz
index 17f83545d..62e3fa329 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_subprocess.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_subprocess.p.gz
index 1ecf26961..36970348a 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_subprocess.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_subprocess.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_time.p.gz
index 1c68ad11e..8db997abf 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_time.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_uuid.p.gz
index 7d4bae7a9..73f90ac2b 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_wmi.p.gz
index c1d101887..3ae9a6f46 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_wmi.p.gz
index 2f30402a9..9ab4f7c75 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_nova.utils.p.gz
new file mode 100644
index 000000000..5887bd8cb
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..3b18b3984
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_shutil.p.gz
index 578b33da7..72e7c7c83 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_subprocess.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_subprocess.p.gz
index 1da1b4dd0..3b17cc74f 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_subprocess.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_subprocess.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_time.p.gz
index 67798704f..0f6be50bc 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_time.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_uuid.p.gz
index 54585f18c..168353e36 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_wmi.p.gz
index 61ca098cb..58a3b58cd 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_nova.utils.p.gz
new file mode 100644
index 000000000..41076dc75
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..316e38545
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_os.p.gz
index 5f5a6c383..6249c2d1a 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_os.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_shutil.p.gz
index 61c59ea1f..c1c8e5e69 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_time.p.gz
index 91252758c..c79054da2 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_time.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_uuid.p.gz
index b06fd1371..51b9b8bac 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_wmi.p.gz
index c6e9722c2..004044ba2 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_nova.utils.p.gz
new file mode 100644
index 000000000..538aa7195
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..704842f81
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_os.p.gz
index 809332508..40ef2d2c0 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_os.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_shutil.p.gz
index d4b9d8921..295234835 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_subprocess.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_subprocess.p.gz
index c6124e1e0..a151a99b4 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_subprocess.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_subprocess.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_time.p.gz
index 7b7110e06..3c8faa586 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_time.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_uuid.p.gz
index 6c254032c..d595ed0ca 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_wmi.p.gz
index 595510cff..d15fbd645 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_multiprocessing.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_multiprocessing.p.gz
index 3f50a76e0..dd8eb1248 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_multiprocessing.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_multiprocessing.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_os.p.gz
new file mode 100644
index 000000000..27611287c
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_shutil.p.gz
index 35126ad4b..47e882ab0 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_wmi.p.gz
index 1a34569d1..3b91f7e0a 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_host_stats_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_host_stats_os.p.gz
new file mode 100644
index 000000000..cf6c9a79a
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_host_stats_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_host_stats_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_host_stats_shutil.p.gz
index 805931bbb..882c26536 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_host_stats_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_host_stats_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_host_stats_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_host_stats_wmi.p.gz
index e9852d038..4c58efecf 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_host_stats_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_host_stats_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_nova.utils.p.gz
new file mode 100644
index 000000000..7e8eaf1b5
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..a3a780940
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_os.p.gz
index a292ad56e..afe233bd2 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_os.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_shutil.p.gz
index bc29985bd..86e3beba7 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_time.p.gz
index 21812b0fa..413f1abda 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_time.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_uuid.p.gz
index 13f51b856..7d4c15fa6 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_wmi.p.gz
index fca5d9328..9d50ab3fd 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_list_instances_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_list_instances_shutil.p.gz
index b082714cd..3e93e6a91 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_list_instances_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_list_instances_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_list_instances_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_list_instances_wmi.p.gz
index 103f00b84..f091ff6f7 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_list_instances_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_list_instances_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_nova.utils.p.gz
new file mode 100644
index 000000000..6de95eb6b
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..19bdf4eeb
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_os.p.gz
index 3ab274719..0c1b16e8c 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_os.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_shutil.p.gz
index 9d89a627d..edad68afb 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_time.p.gz
index 2c6fa5a22..1f0e2910b 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_time.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_uuid.p.gz
index 9a54bbb62..4c92ce498 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_nova.utils.p.gz
new file mode 100644
index 000000000..7052c4ae8
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..682897b93
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_os.p.gz
index 0b6aff86d..d86dfeff9 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_os.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_shutil.p.gz
index 51331083e..cb9b24e10 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_time.p.gz
index fb5e35662..f069f645e 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_time.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_uuid.p.gz
index d8c75ba3c..ef03cd677 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_wmi.p.gz
index 92bbed9da..75047b386 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_wmi.p.gz
index bb4535336..571060079 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_nova.utils.p.gz
new file mode 100644
index 000000000..de596bb42
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..0b431f2ed
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_os.p.gz
index b2af3e865..a02d8ab81 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_os.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_shutil.p.gz
index 293c9b3cf..0499ae451 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_time.p.gz
index b43ba2897..83d492f49 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_time.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_uuid.p.gz
index a1b757b60..5cbe40932 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_wmi.p.gz
index f988eca93..7257ce53d 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_nova.utils.p.gz
new file mode 100644
index 000000000..a43a22433
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..bb8679675
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_os.p.gz
index 4d53ded9b..5a0928c55 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_os.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_shutil.p.gz
index 42e3d8f98..90382ae1e 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_time.p.gz
index e7728c515..74ee76c84 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_time.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_uuid.p.gz
index a970cc189..19797fdd4 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_wmi.p.gz
index 6b3414f25..b4e3ed693 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_nova.utils.p.gz
new file mode 100644
index 000000000..978b8a65d
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..6ca0136df
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_os.p.gz
index 11910aa8f..c45f95b08 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_os.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_shutil.p.gz
index a128ac167..fe6edc718 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_time.p.gz
index b56c849ea..60684faaf 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_time.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_uuid.p.gz
index adf7b4648..8a9d6aa54 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_wmi.p.gz
index 907cf2e25..4d9ea2803 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_nova.utils.p.gz
new file mode 100644
index 000000000..97f4cc53b
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..ef7664f08
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_os.p.gz
index 81877dd6e..d77989fda 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_os.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_shutil.p.gz
index 33a72e90e..cb894988e 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_time.p.gz
index ff56a9287..f6f7c2e39 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_time.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_uuid.p.gz
index 682dd6d40..1f6dfde64 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_wmi.p.gz
index fba91bfff..9a68d6cc4 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_nova.utils.p.gz
new file mode 100644
index 000000000..a98f4ae22
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..915459d5e
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_os.p.gz
index 1578751ee..f687f6295 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_os.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_shutil.p.gz
index 987eeb6da..fc555da69 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_time.p.gz
index 27495c884..43da7d9d1 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_time.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_uuid.p.gz
index 80d62a9a4..7e76d1734 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_wmi.p.gz
index 1ad1d60dc..2b6cd0afc 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_nova.utils.p.gz
new file mode 100644
index 000000000..5a132a3a3
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..d5abaa4d5
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_os.p.gz
index 3855ac0dd..783d747f9 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_os.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_shutil.p.gz
index 8f1d273f2..ff23ded0f 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_time.p.gz
index 927204978..342a61054 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_time.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_uuid.p.gz
index 849fd1c8c..10b1b5c7b 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_wmi.p.gz
index 41aa8ccfb..5443c3ac4 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_nova.utils.p.gz
new file mode 100644
index 000000000..456af2816
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_os.p.gz
index e69a69a20..ca1103d60 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_os.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_shutil.p.gz
index d5aa712ac..f9ed1b830 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_time.p.gz
index db090ad4d..3a207f066 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_time.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_uuid.p.gz
index ae76e5693..715d86d61 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_wmi.p.gz
index 8e4e9bd65..c2e54a1df 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_nova.utils.p.gz
new file mode 100644
index 000000000..f58f80a79
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_shutil.p.gz
index 991858501..a0dc11cc5 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_uuid.p.gz
index f4a514e5c..f48621c25 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_wmi.p.gz
index 3916fc0fb..e41277cac 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_nova.utils.p.gz
new file mode 100644
index 000000000..dca595c41
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..99868059d
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_os.p.gz
index de1f831de..251cadc51 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_os.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_shutil.p.gz
index 751668b6f..d7a6b8111 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_time.p.gz
index 922fce900..dd6975095 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_time.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_uuid.p.gz
index c79c72334..1bdaf90c0 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_wmi.p.gz
index 3cedfe1ba..0190de402 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_nova.utils.p.gz
new file mode 100644
index 000000000..e1099b1d9
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..a375a9a21
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_os.p.gz
index 626398469..7551892ff 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_os.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_shutil.p.gz
index 15a83ac0c..30ac616da 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_time.p.gz
index 755cf2e08..db6219f30 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_time.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_uuid.p.gz
index d14db9b2f..47d4218d4 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_wmi.p.gz
index 679287e3a..14a3977e8 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_nova.utils.p.gz
new file mode 100644
index 000000000..ab0dc1f7d
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..0496337af
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_os.p.gz
index ed654b90e..f1661f679 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_os.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_shutil.p.gz
index 5b7ff554d..8a73b5c56 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_time.p.gz
index d89b52377..d7cf111e9 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_time.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_uuid.p.gz
index 764e6c45e..477ddc99f 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_wmi.p.gz
index a63f4881a..9105c0b05 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_nova.utils.p.gz
new file mode 100644
index 000000000..03253379b
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..e08050979
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_os.p.gz
index 607047b38..e19457a43 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_os.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_shutil.p.gz
index 4f8b93282..93beb48a1 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_time.p.gz
index 429a96d7e..41f19fa85 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_time.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_uuid.p.gz
index ac9c25734..7cfbbe1e8 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_nova.utils.p.gz
new file mode 100644
index 000000000..219649aca
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..82ee0bfe5
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_os.p.gz
index 82b3a6185..c32610c41 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_os.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_shutil.p.gz
index 741f28905..e87fc1e9f 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_time.p.gz
index 5c633dc73..f2db1ffe9 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_time.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_uuid.p.gz
index da8c02d81..f8ea9d024 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_wmi.p.gz
index 9e0baf1cd..9f164feed 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_wmi.p.gz
index f647f9516..6a04f478a 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_nova.utils.p.gz
new file mode 100644
index 000000000..8556954fb
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..0522732b0
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_os.p.gz
new file mode 100644
index 000000000..549826d97
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_shutil.p.gz
new file mode 100644
index 000000000..5897fb881
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_time.p.gz
new file mode 100644
index 000000000..88d6dd801
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_uuid.p.gz
new file mode 100644
index 000000000..92c094f83
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_wmi.p.gz
new file mode 100644
index 000000000..11d119734
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_nova.utils.p.gz
new file mode 100644
index 000000000..de22de40b
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..e71ed2f00
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_os.p.gz
new file mode 100644
index 000000000..43f0bb97a
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_shutil.p.gz
new file mode 100644
index 000000000..3c8490bbf
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_time.p.gz
new file mode 100644
index 000000000..1df9f6c17
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_uuid.p.gz
new file mode 100644
index 000000000..efa732cdc
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_wmi.p.gz
new file mode 100644
index 000000000..ccbf97ec9
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_nova.utils.p.gz
new file mode 100644
index 000000000..ac8216084
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..adc790897
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_os.p.gz
index cd1356e9e..8d110a5e8 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_os.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_shutil.p.gz
index 8add1aafc..5e8920af7 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_time.p.gz
index c889f9472..8642d8fe1 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_time.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_uuid.p.gz
index 20a8cad07..1dc8915e0 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_wmi.p.gz
index 9fec601ab..7c12440a0 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_nova.utils.p.gz
new file mode 100644
index 000000000..2cb9335f7
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..bca74208e
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_os.p.gz
new file mode 100644
index 000000000..7e4ee2ac1
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_shutil.p.gz
new file mode 100644
index 000000000..f9fd44f3f
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_time.p.gz
new file mode 100644
index 000000000..27973f8ae
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_uuid.p.gz
new file mode 100644
index 000000000..dfd674c80
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_wmi.p.gz
new file mode 100644
index 000000000..1607d2fcf
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_nova.utils.p.gz
new file mode 100644
index 000000000..442614b77
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..4f3666db4
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_os.p.gz
index 4587a6fda..eeed06c23 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_os.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_shutil.p.gz
index 48cb908c1..d860826c4 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_time.p.gz
index 0d15a012e..397114c1d 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_time.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_uuid.p.gz
index b0b49c932..33e0db7c2 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_wmi.p.gz
index 574ce071e..cab8a254e 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_nova.utils.p.gz
new file mode 100644
index 000000000..428e2a44d
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_os.p.gz
index c19b6e25e..2f63ff85e 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_os.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_shutil.p.gz
index 1d655bb02..6c17e23a9 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_time.p.gz
index 678b4cd10..7d2b7d6b8 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_time.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_uuid.p.gz
index 0884a350b..02aca2da4 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_wmi.p.gz
index 128b20ac5..526c23b72 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_nova.utils.p.gz
new file mode 100644
index 000000000..d4c1f05e4
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..5ae976489
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_os.p.gz
index bc4d4b99d..b6f94a692 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_os.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_shutil.p.gz
index 8de7c4e71..75f2c62fd 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_time.p.gz
index ee94dd6c2..7c7d2338f 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_time.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_uuid.p.gz
index 313bcfa06..ff432b874 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_wmi.p.gz
index de8064431..4bb9cd398 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_nova.utils.p.gz
new file mode 100644
index 000000000..e64ca9fe6
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..7588241e2
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_os.p.gz
index e852140a1..fe4595ab7 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_os.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_shutil.p.gz
index f89c63faf..b64e79521 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_time.p.gz
index 12cda7550..f1a900c69 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_time.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_uuid.p.gz
index 07445af3e..a0eae7afe 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_wmi.p.gz
index 8e21428f2..d0818164c 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_nova.utils.p.gz
new file mode 100644
index 000000000..9c5d6a314
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..a3845fcb6
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_os.p.gz
index 794d9a09d..f12e09fa1 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_os.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_shutil.p.gz
index 775f8232c..7e8190f0a 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_time.p.gz
index d0c0306f2..83e301a63 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_time.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_uuid.p.gz
index 3cb6c4b7f..47ec9bbef 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_wmi.p.gz
index a48d4aa9b..490409abc 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_wmi.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_nova.utils.p.gz
new file mode 100644
index 000000000..a9343f436
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_nova.utils.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_nova.virt.configdrive.p.gz
new file mode 100644
index 000000000..051467f97
--- /dev/null
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_nova.virt.configdrive.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_os.p.gz
index 5578f64f8..56a260f62 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_os.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_os.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_shutil.p.gz
index 224ba464f..e0c5dd59f 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_shutil.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_shutil.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_time.p.gz
index 29c15fe82..7e5a514a8 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_time.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_time.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_uuid.p.gz
index 9ac16ec7d..00636f0c4 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_uuid.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_uuid.p.gz
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_wmi.p.gz
index d6244c3fc..910545998 100644
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_wmi.p.gz
+++ b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_wmi.p.gz
Binary files differ
diff --git a/nova/tests/image/fake.py b/nova/tests/image/fake.py
index fa5e770f8..58c03b167 100644
--- a/nova/tests/image/fake.py
+++ b/nova/tests/image/fake.py
@@ -20,20 +20,18 @@
import copy
import datetime
+import uuid
+from nova import config
from nova import exception
-from nova import flags
import nova.image.glance
from nova.openstack.common import log as logging
-from nova import utils
+CONF = config.CONF
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
-
-
class _FakeImageService(object):
"""Mock (fake) image service for unit testing."""
@@ -53,8 +51,8 @@ class _FakeImageService(object):
'is_public': False,
'container_format': 'raw',
'disk_format': 'raw',
- 'properties': {'kernel_id': FLAGS.null_kernel,
- 'ramdisk_id': FLAGS.null_kernel,
+ 'properties': {'kernel_id': CONF.null_kernel,
+ 'ramdisk_id': CONF.null_kernel,
'architecture': 'x86_64'}}
image2 = {'id': 'a2459075-d96c-40d5-893e-577ff92e721c',
@@ -67,8 +65,8 @@ class _FakeImageService(object):
'is_public': True,
'container_format': 'ami',
'disk_format': 'ami',
- 'properties': {'kernel_id': FLAGS.null_kernel,
- 'ramdisk_id': FLAGS.null_kernel}}
+ 'properties': {'kernel_id': CONF.null_kernel,
+ 'ramdisk_id': CONF.null_kernel}}
image3 = {'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'name': 'fakeimage123456',
@@ -80,8 +78,8 @@ class _FakeImageService(object):
'is_public': True,
'container_format': None,
'disk_format': None,
- 'properties': {'kernel_id': FLAGS.null_kernel,
- 'ramdisk_id': FLAGS.null_kernel}}
+ 'properties': {'kernel_id': CONF.null_kernel,
+ 'ramdisk_id': CONF.null_kernel}}
image4 = {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fakeimage123456',
@@ -93,8 +91,8 @@ class _FakeImageService(object):
'is_public': True,
'container_format': 'ami',
'disk_format': 'ami',
- 'properties': {'kernel_id': FLAGS.null_kernel,
- 'ramdisk_id': FLAGS.null_kernel}}
+ 'properties': {'kernel_id': CONF.null_kernel,
+ 'ramdisk_id': CONF.null_kernel}}
image5 = {'id': 'c905cedb-7281-47e4-8a62-f26bc5fc4c77',
'name': 'fakeimage123456',
@@ -120,8 +118,8 @@ class _FakeImageService(object):
'is_public': False,
'container_format': 'ova',
'disk_format': 'vhd',
- 'properties': {'kernel_id': FLAGS.null_kernel,
- 'ramdisk_id': FLAGS.null_kernel,
+ 'properties': {'kernel_id': CONF.null_kernel,
+ 'ramdisk_id': CONF.null_kernel,
'architecture': 'x86_64',
'auto_disk_config': 'False'}}
@@ -135,8 +133,8 @@ class _FakeImageService(object):
'is_public': False,
'container_format': 'ova',
'disk_format': 'vhd',
- 'properties': {'kernel_id': FLAGS.null_kernel,
- 'ramdisk_id': FLAGS.null_kernel,
+ 'properties': {'kernel_id': CONF.null_kernel,
+ 'ramdisk_id': CONF.null_kernel,
'architecture': 'x86_64',
'auto_disk_config': 'True'}}
@@ -178,7 +176,7 @@ class _FakeImageService(object):
:raises: Duplicate if the image already exist.
"""
- image_id = str(metadata.get('id', utils.gen_uuid()))
+ image_id = str(metadata.get('id', uuid.uuid4()))
metadata['id'] = image_id
if image_id in self.images:
raise exception.Duplicate()
diff --git a/nova/tests/image/test_glance.py b/nova/tests/image/test_glance.py
index 13e090cef..e8baf4353 100644
--- a/nova/tests/image/test_glance.py
+++ b/nova/tests/image/test_glance.py
@@ -28,6 +28,7 @@ from nova.image import glance
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests.glance import stubs as glance_stubs
+from nova.tests import matchers
class NullWriter(object):
@@ -155,10 +156,10 @@ class TestGlanceImageService(test.TestCase):
'properties': {'instance_id': '42', 'user_id': 'fake'},
'owner': None,
}
- self.assertDictMatch(image_meta, expected)
+ self.assertThat(image_meta, matchers.DictMatches(expected))
image_metas = self.service.detail(self.context)
- self.assertDictMatch(image_metas[0], expected)
+ self.assertThat(image_metas[0], matchers.DictMatches(expected))
def test_create_without_instance_id(self):
"""
@@ -188,7 +189,7 @@ class TestGlanceImageService(test.TestCase):
'owner': None,
}
actual = self.service.show(self.context, image_id)
- self.assertDictMatch(actual, expected)
+ self.assertThat(actual, matchers.DictMatches(expected))
def test_create(self):
fixture = self._make_fixture(name='test image')
@@ -259,7 +260,7 @@ class TestGlanceImageService(test.TestCase):
'owner': None,
}
- self.assertDictMatch(meta, expected)
+ self.assertThat(meta, matchers.DictMatches(expected))
i = i + 1
def test_detail_limit(self):
@@ -315,7 +316,7 @@ class TestGlanceImageService(test.TestCase):
'deleted': None,
'owner': None,
}
- self.assertDictMatch(meta, expected)
+ self.assertThat(meta, matchers.DictMatches(expected))
i = i + 1
def test_detail_invalid_marker(self):
diff --git a/nova/tests/integrated/api_samples/README.rst b/nova/tests/integrated/api_samples/README.rst
index b4e274334..065df1d32 100644
--- a/nova/tests/integrated/api_samples/README.rst
+++ b/nova/tests/integrated/api_samples/README.rst
@@ -5,7 +5,7 @@ Samples in this directory are automatically generated from the api samples
integration tests. To regenerate the samples, simply set GENERATE_SAMPLES
in the environment before running the tests. For example:
- GENERATE_SAMPLES=True ./run_tests.py nova.tests.integrated
+ GENERATE_SAMPLES=True tox -epy27 nova.tests.integrated
If new tests are added or the .tpl files are changed due to bug fixes, the
samples should be regenerated so they are in sync with the templates.
diff --git a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl
index 9581a0e7e..531508951 100644
--- a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl
+++ b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl
@@ -137,6 +137,14 @@
"updated": "%(timestamp)s"
},
{
+ "alias": "os-fixed-ips",
+ "description": "Fixed IPs support",
+ "links": [],
+ "name": "FixedIPs",
+ "namespace": "http://docs.openstack.org/compute/ext/fixed_ips/api/v2",
+ "updated": "2012-10-18T13:25:27-06:00"
+ },
+ {
"alias": "os-flavor-access",
"description": "%(text)s",
"links": [],
@@ -209,6 +217,22 @@
"updated": "%(timestamp)s"
},
{
+ "alias": "os-services",
+ "description": "%(text)s",
+ "links": [],
+ "name": "Services",
+ "namespace": "http://docs.openstack.org/compute/ext/services/api/v2",
+ "updated": "%(timestamp)s"
+ },
+ {
+ "alias": "os-fping",
+ "description": "%(text)s",
+ "links": [],
+ "name": "Fping",
+ "namespace": "http://docs.openstack.org/compute/ext/fping/api/v1.1",
+ "updated": "%(timestamp)s"
+ },
+ {
"alias": "os-hypervisors",
"description": "%(text)s",
"links": [],
diff --git a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl
index e8246aad8..5f58cc7d7 100644
--- a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl
+++ b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl
@@ -51,6 +51,9 @@
<extension alias="os-deferred-delete" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/deferred-delete/api/v1.1" name="DeferredDelete">
<description>%(text)s</description>
</extension>
+ <extension alias="os-fixed-ips" name="FixedIPs" namespace="http://docs.openstack.org/compute/ext/fixed_ips/api/v2" updated="2012-10-18T13:25:27-06:00">
+ <description>Fixed IPs support</description>
+ </extension>
<extension alias="os-flavor-access" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/flavor_access/api/v2" name="FlavorAccess">
<description>%(text)s</description>
</extension>
@@ -78,6 +81,12 @@
<extension alias="os-hosts" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/hosts/api/v1.1" name="Hosts">
<description>%(text)s</description>
</extension>
+ <extension alias="os-services" name="Services" namespace="http://docs.openstack.org/compute/ext/services/api/v2" updated="%(timestamp)s">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-fping" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/fping/api/v1.1" name="Fping">
+ <description>%(text)s</description>
+ </extension>
<extension alias="os-hypervisors" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/hypervisors/api/v1.1" name="Hypervisors">
<description>%(text)s</description>
</extension>
diff --git a/nova/tests/integrated/api_samples/limit-get-resp.json.tpl b/nova/tests/integrated/api_samples/limit-get-resp.json.tpl
index 376352ec4..a86d5faa2 100644
--- a/nova/tests/integrated/api_samples/limit-get-resp.json.tpl
+++ b/nova/tests/integrated/api_samples/limit-get-resp.json.tpl
@@ -10,8 +10,6 @@
"maxTotalInstances": 10,
"maxTotalKeypairs": 100,
"maxTotalRAMSize": 51200,
- "maxTotalVolumeGigabytes": 1000,
- "maxTotalVolumes": 10,
"maxSecurityGroups": 10,
"maxSecurityGroupRules": 20
},
@@ -68,6 +66,19 @@
],
"regex": ".*changes-since.*",
"uri": "*changes-since*"
+ },
+ {
+ "limit": [
+ {
+ "next-available": "%(timestamp)s",
+ "remaining": 12,
+ "unit": "HOUR",
+ "value": 12,
+ "verb": "GET"
+ }
+ ],
+ "regex": "^/os-fping",
+ "uri": "*/os-fping"
}
]
}
diff --git a/nova/tests/integrated/api_samples/limit-get-resp.xml.tpl b/nova/tests/integrated/api_samples/limit-get-resp.xml.tpl
index 25de4734a..6f92bcee6 100644
--- a/nova/tests/integrated/api_samples/limit-get-resp.xml.tpl
+++ b/nova/tests/integrated/api_samples/limit-get-resp.xml.tpl
@@ -12,6 +12,9 @@
<rate regex=".*changes-since.*" uri="*changes-since*">
<limit next-available="%(timestamp)s" unit="MINUTE" verb="GET" remaining="3" value="3"/>
</rate>
+ <rate regex="^/os-fping" uri="*/os-fping">
+ <limit next-available="%(timestamp)s" unit="HOUR" verb="GET" remaining="12" value="12"/>
+ </rate>
</rates>
<absolute>
<limit name="maxServerMeta" value="128"/>
@@ -21,11 +24,9 @@
<limit name="maxPersonalitySize" value="10240"/>
<limit name="maxSecurityGroupRules" value="20"/>
<limit name="maxTotalKeypairs" value="100"/>
- <limit name="maxTotalVolumes" value="10"/>
<limit name="maxSecurityGroups" value="10"/>
<limit name="maxTotalCores" value="20"/>
<limit name="maxTotalFloatingIps" value="10"/>
- <limit name="maxTotalVolumeGigabytes" value="1000"/>
<limit name="maxTotalRAMSize" value="51200"/>
</absolute>
</limits>
diff --git a/nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-req.json.tpl b/nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-req.json.tpl
new file mode 100644
index 000000000..1926119ce
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "os-getVNCConsole": {
+ "type": "novnc"
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-req.xml.tpl b/nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-req.xml.tpl
new file mode 100644
index 000000000..00f32c6b9
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-req.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<os-getVNCConsole type="novnc" />
diff --git a/nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-resp.json.tpl b/nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-resp.json.tpl
new file mode 100644
index 000000000..3cf725575
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-resp.json.tpl
@@ -0,0 +1,6 @@
+{
+ "console": {
+ "type": "novnc",
+ "url":"%(url)s"
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-resp.xml.tpl b/nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-resp.xml.tpl
new file mode 100644
index 000000000..d4904aa9a
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-resp.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<console>
+ <type>novnc</type>
+ <url>%(url)s</url>
+</console>
diff --git a/nova/tests/integrated/api_samples/os-consoles/server-post-req.json.tpl b/nova/tests/integrated/api_samples/os-consoles/server-post-req.json.tpl
new file mode 100644
index 000000000..d3916d1aa
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-consoles/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-consoles/server-post-req.xml.tpl b/nova/tests/integrated/api_samples/os-consoles/server-post-req.xml.tpl
new file mode 100644
index 000000000..f92614984
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-consoles/server-post-req.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="%(host)s/openstack/images/%(image_id)s" flavorRef="%(host)s/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/integrated/api_samples/os-consoles/server-post-resp.json.tpl b/nova/tests/integrated/api_samples/os-consoles/server-post-resp.json.tpl
new file mode 100644
index 000000000..d5f030c87
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-consoles/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-consoles/server-post-resp.xml.tpl b/nova/tests/integrated/api_samples/os-consoles/server-post-resp.xml.tpl
new file mode 100644
index 000000000..3bb13e69b
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-consoles/server-post-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/integrated/api_samples/os-used-limits/usedlimits-get-resp.json.tpl b/nova/tests/integrated/api_samples/os-used-limits/usedlimits-get-resp.json.tpl
index 9c927eeef..78ddbb5af 100644
--- a/nova/tests/integrated/api_samples/os-used-limits/usedlimits-get-resp.json.tpl
+++ b/nova/tests/integrated/api_samples/os-used-limits/usedlimits-get-resp.json.tpl
@@ -12,15 +12,11 @@
"maxTotalInstances": 10,
"maxTotalKeypairs": 100,
"maxTotalRAMSize": 51200,
- "maxTotalVolumeGigabytes": 1000,
- "maxTotalVolumes": 10,
"totalCoresUsed": 0,
"totalInstancesUsed": 0,
"totalKeyPairsUsed": 0,
"totalRAMUsed": 0,
- "totalSecurityGroupsUsed": 0,
- "totalVolumeGigabytesUsed": 0,
- "totalVolumesUsed": 0
+ "totalSecurityGroupsUsed": 0
},
"rate": [
{
@@ -75,6 +71,19 @@
],
"regex": ".*changes-since.*",
"uri": "*changes-since*"
+ },
+ {
+ "limit": [
+ {
+ "next-available": "%(timestamp)s",
+ "remaining": 12,
+ "unit": "HOUR",
+ "value": 12,
+ "verb": "GET"
+ }
+ ],
+ "regex": "^/os-fping",
+ "uri": "*/os-fping"
}
]
}
diff --git a/nova/tests/integrated/api_samples/os-used-limits/usedlimits-get-resp.xml.tpl b/nova/tests/integrated/api_samples/os-used-limits/usedlimits-get-resp.xml.tpl
index a11a577d0..75526473a 100644
--- a/nova/tests/integrated/api_samples/os-used-limits/usedlimits-get-resp.xml.tpl
+++ b/nova/tests/integrated/api_samples/os-used-limits/usedlimits-get-resp.xml.tpl
@@ -12,6 +12,9 @@
<rate regex=".*changes-since.*" uri="*changes-since*">
<limit next-available="%(timestamp)s" unit="MINUTE" verb="GET" remaining="3" value="3"/>
</rate>
+ <rate regex="^/os-fping" uri="*/os-fping">
+ <limit next-available="%(timestamp)s" unit="HOUR" verb="GET" remaining="12" value="12"/>
+ </rate>
</rates>
<absolute>
<limit name="maxServerMeta" value="128"/>
@@ -19,20 +22,16 @@
<limit name="maxPersonality" value="5"/>
<limit name="maxImageMeta" value="128"/>
<limit name="maxPersonalitySize" value="10240"/>
- <limit name="totalVolumesUsed" value="0"/>
<limit name="maxSecurityGroupRules" value="20"/>
<limit name="maxTotalKeypairs" value="100"/>
<limit name="totalCoresUsed" value="0"/>
- <limit name="maxTotalVolumes" value="10"/>
<limit name="totalRAMUsed" value="0"/>
<limit name="totalInstancesUsed" value="0"/>
<limit name="maxSecurityGroups" value="10"/>
- <limit name="totalVolumeGigabytesUsed" value="0"/>
<limit name="maxTotalCores" value="20"/>
<limit name="totalSecurityGroupsUsed" value="0"/>
<limit name="maxTotalFloatingIps" value="10"/>
<limit name="totalKeyPairsUsed" value="0"/>
- <limit name="maxTotalVolumeGigabytes" value="1000"/>
<limit name="maxTotalRAMSize" value="51200"/>
</absolute>
</limits>
diff --git a/nova/tests/integrated/integrated_helpers.py b/nova/tests/integrated/integrated_helpers.py
index b1b2c076e..825881137 100644
--- a/nova/tests/integrated/integrated_helpers.py
+++ b/nova/tests/integrated/integrated_helpers.py
@@ -21,6 +21,7 @@ Provides common functionality for integrated unit tests
import random
import string
+import uuid
import nova.image.glance
from nova.openstack.common.log import logging
@@ -29,7 +30,6 @@ from nova import test # For the flags
from nova.tests import fake_crypto
import nova.tests.image.fake
from nova.tests.integrated.api import client
-from nova import utils
LOG = logging.getLogger(__name__)
@@ -116,7 +116,7 @@ class _IntegratedTestBase(test.TestCase):
return generate_new_element(server_names, 'server')
def get_invalid_image(self):
- return str(utils.gen_uuid())
+ return str(uuid.uuid4())
def _build_minimal_create_server_request(self):
server = {}
diff --git a/nova/tests/integrated/test_api_samples.py b/nova/tests/integrated/test_api_samples.py
index 3bcd737bb..933764427 100644
--- a/nova/tests/integrated/test_api_samples.py
+++ b/nova/tests/integrated/test_api_samples.py
@@ -24,9 +24,9 @@ from lxml import etree
from nova.cloudpipe.pipelib import CloudPipe
from nova.compute import api
+from nova import config
from nova import context
from nova import db
-from nova import flags
from nova.network.manager import NetworkManager
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
@@ -38,7 +38,7 @@ from nova.tests import fake_network
from nova.tests.image import fake
from nova.tests.integrated import integrated_helpers
-FLAGS = flags.FLAGS
+CONF = config.CONF
LOG = logging.getLogger(__name__)
@@ -178,6 +178,7 @@ class ApiSampleTestBase(integrated_helpers._IntegratedTestBase):
result = str(result)
expected = expected.replace('int:', '')
expected = expected % subs
+ expected = '^%s$' % expected
match = re.match(expected, result)
if not match:
raise NoMatch(_('Values do not match:\n'
@@ -225,7 +226,7 @@ class ApiSampleTestBase(integrated_helpers._IntegratedTestBase):
'(Z|(\+|-)\d{2}:\d{2}|\.\d{6})',
'password': '[0-9a-zA-Z]{1,12}',
'ip': '[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}',
- 'ip6': '([0-9a-zA-Z]{1,4}:){1,7}:?[0-9a-zA-Z]',
+ 'ip6': '([0-9a-zA-Z]{1,4}:){1,7}:?[0-9a-zA-Z]{1,4}',
'id': '(?P<id>[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}'
'-[0-9a-f]{4}-[0-9a-f]{12})',
'uuid': '[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}'
@@ -590,7 +591,7 @@ class ServersActionsJsonTest(ServersSampleBase):
'server-action-rebuild-resp')
def test_server_resize(self):
- FLAGS.allow_resize_to_same_host = True
+ CONF.allow_resize_to_same_host = True
uuid = self._post_server()
self._test_server_action(uuid, "resize",
{"id": 2,
@@ -678,7 +679,7 @@ class FlavorsExtraDataJsonTest(ApiSampleTestBase):
def _get_flags(self):
f = super(FlavorsExtraDataJsonTest, self)._get_flags()
- f['osapi_compute_extension'] = FLAGS.osapi_compute_extension[:]
+ f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
# Flavorextradata extension also needs Flavormanage to be loaded.
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.flavormanage.Flavormanage')
@@ -833,8 +834,8 @@ class FloatingIpsJsonTest(ApiSampleTestBase):
def setUp(self):
super(FloatingIpsJsonTest, self).setUp()
- pool = FLAGS.default_floating_pool
- interface = FLAGS.public_interface
+ pool = CONF.default_floating_pool
+ interface = CONF.public_interface
self.ip_pool = [
{
@@ -895,7 +896,7 @@ class FloatingIpsJsonTest(ApiSampleTestBase):
def test_floating_ips_create(self):
response = self._do_post('os-floating-ips',
'floating-ips-create-req',
- {"pool": FLAGS.default_floating_pool})
+ {"pool": CONF.default_floating_pool})
self.assertEqual(response.status, 200)
subs = self._get_regexes()
self._verify_response('floating-ips-create-resp',
@@ -1060,14 +1061,14 @@ class CloudPipeSampleJsonTest(ApiSampleTestBase):
def test_cloud_pipe_create(self):
"""Get api samples of cloud pipe extension creation"""
- FLAGS.vpn_image_id = fake.get_valid_image_id()
+ CONF.vpn_image_id = fake.get_valid_image_id()
project = {'project_id': 'cloudpipe-' + str(uuid.uuid4())}
response = self._do_post('os-cloudpipe', 'cloud-pipe-create-req',
project)
self.assertEqual(response.status, 200)
subs = self._get_regexes()
subs.update(project)
- subs['image_id'] = FLAGS.vpn_image_id
+ subs['image_id'] = CONF.vpn_image_id
self._verify_response('cloud-pipe-create-resp', subs, response)
return project
@@ -1078,7 +1079,7 @@ class CloudPipeSampleJsonTest(ApiSampleTestBase):
self.assertEqual(response.status, 200)
subs = self._get_regexes()
subs.update(project)
- subs['image_id'] = FLAGS.vpn_image_id
+ subs['image_id'] = CONF.vpn_image_id
return self._verify_response('cloud-pipe-get-resp', subs, response)
@@ -1443,3 +1444,24 @@ class AdminActionsSamplesJsonTest(ServersSampleBase):
class AdminActionsSamplesXmlTest(AdminActionsSamplesJsonTest):
ctype = 'xml'
+
+
+class ConsolesSampleJsonTests(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".consoles.Consoles")
+
+ def test_get_vnc_console(self):
+ uuid = self._post_server()
+ response = self._do_post('servers/%s/action' % uuid,
+ 'get-vnc-console-post-req',
+ {'action': 'os-getVNCConsole'})
+ self.assertEqual(response.status, 200)
+ subs = self._get_regexes()
+ subs["url"] = \
+ "((https?):((//)|(\\\\))+([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*)"
+ return self._verify_response('get-vnc-console-post-resp',
+ subs, response)
+
+
+class ConsoleOutputSampleXmlTests(ConsoleOutputSampleJsonTest):
+ ctype = 'xml'
diff --git a/nova/tests/integrated/test_extensions.py b/nova/tests/integrated/test_extensions.py
index 056ff32b7..c5897da12 100644
--- a/nova/tests/integrated/test_extensions.py
+++ b/nova/tests/integrated/test_extensions.py
@@ -15,19 +15,18 @@
# License for the specific language governing permissions and limitations
# under the License.
-from nova import flags
+from nova import config
from nova.openstack.common.log import logging
from nova.tests.integrated import integrated_helpers
-
-FLAGS = flags.FLAGS
+CONF = config.CONF
LOG = logging.getLogger(__name__)
class ExtensionsTest(integrated_helpers._IntegratedTestBase):
def _get_flags(self):
f = super(ExtensionsTest, self)._get_flags()
- f['osapi_compute_extension'] = FLAGS.osapi_compute_extension[:]
+ f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.tests.api.openstack.compute.extensions.'
'foxinsocks.Foxinsocks')
diff --git a/nova/tests/integrated/test_multiprocess_api.py b/nova/tests/integrated/test_multiprocess_api.py
index fbab7eb49..fbbf92eec 100644
--- a/nova/tests/integrated/test_multiprocess_api.py
+++ b/nova/tests/integrated/test_multiprocess_api.py
@@ -21,12 +21,11 @@ import signal
import time
import traceback
-from nova import flags
+from nova import config
from nova.openstack.common.log import logging
from nova import service
from nova.tests.integrated import integrated_helpers
-FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py
index 0c9024a8a..d971af4f4 100644
--- a/nova/tests/integrated/test_servers.py
+++ b/nova/tests/integrated/test_servers.py
@@ -434,7 +434,3 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
self._delete_server(created_server_id)
for server_id in server_map.iterkeys():
self._delete_server(server_id)
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/nova/tests/matchers.py b/nova/tests/matchers.py
new file mode 100644
index 000000000..c3b88d2e5
--- /dev/null
+++ b/nova/tests/matchers.py
@@ -0,0 +1,196 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2012 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Matcher classes to be used inside of the testtools assertThat framework."""
+
+import pprint
+
+
+class DictKeysMismatch(object):
+ def __init__(self, d1only, d2only):
+ self.d1only = d1only
+ self.d2only = d2only
+
+ def describe(self):
+ return ('Keys in d1 and not d2: %(d1only)s.'
+ ' Keys in d2 and not d1: %(d2only)s' % self.__dict__)
+
+ def get_details(self):
+ return {}
+
+
+class DictMismatch(object):
+ def __init__(self, key, d1_value, d2_value):
+ self.key = key
+ self.d1_value = d1_value
+ self.d2_value = d2_value
+
+ def describe(self):
+ return ("Dictionaries do not match at %(key)s."
+ " d1: %(d1_value)s d2: %(d2_value)s" % self.__dict__)
+
+ def get_details(self):
+ return {}
+
+
+class DictMatches(object):
+
+ def __init__(self, d1, approx_equal=False, tolerance=0.001):
+ self.d1 = d1
+ self.approx_equal = approx_equal
+ self.tolerance = tolerance
+
+ def __str__(self):
+ return 'DictMatches(%s)' % (pprint.pformat(self.d1))
+
+ # Useful assertions
+ def match(self, d2):
+ """Assert two dicts are equivalent.
+
+ This is a 'deep' match in the sense that it handles nested
+ dictionaries appropriately.
+
+ NOTE:
+
+ If you don't care (or don't know) a given value, you can specify
+ the string DONTCARE as the value. This will cause that dict-item
+ to be skipped.
+
+ """
+
+ d1keys = set(self.d1.keys())
+ d2keys = set(d2.keys())
+ if d1keys != d2keys:
+ d1only = d1keys - d2keys
+ d2only = d2keys - d1keys
+ return DictKeysMismatch(d1only, d2only)
+
+ for key in d1keys:
+ d1value = self.d1[key]
+ d2value = d2[key]
+ try:
+ error = abs(float(d1value) - float(d2value))
+ within_tolerance = error <= self.tolerance
+ except (ValueError, TypeError):
+ # If both values aren't convertible to float, just ignore
+ # ValueError if arg is a str, TypeError if it's something else
+ # (like None)
+ within_tolerance = False
+
+ if hasattr(d1value, 'keys') and hasattr(d2value, 'keys'):
+ matcher = DictMatches(d1value)
+ did_match = matcher.match(d2value)
+ if did_match is not None:
+ return did_match
+ elif 'DONTCARE' in (d1value, d2value):
+ continue
+ elif self.approx_equal and within_tolerance:
+ continue
+ elif d1value != d2value:
+ return DictMismatch(key, d1value, d2value)
+
+
+class ListLengthMismatch(object):
+ def __init__(self, len1, len2):
+ self.len1 = len1
+ self.len2 = len2
+
+ def describe(self):
+ return ('Length mismatch: len(L1)=%(len1)d != '
+ 'len(L2)=%(len2)d' % self.__dict__)
+
+ def get_details(self):
+ return {}
+
+
+class DictListMatches(object):
+
+ def __init__(self, l1, approx_equal=False, tolerance=0.001):
+ self.l1 = l1
+ self.approx_equal = approx_equal
+ self.tolerance = tolerance
+
+ def __str__(self):
+ return 'DictListMatches(%s)' % (pprint.pformat(self.l1))
+
+ # Useful assertions
+ def match(self, l2):
+ """Assert a list of dicts are equivalent."""
+
+ l1count = len(self.l1)
+ l2count = len(l2)
+ if l1count != l2count:
+ return ListLengthMismatch(l1count, l2count)
+
+ for d1, d2 in zip(self.l1, l2):
+ matcher = DictMatches(d2,
+ approx_equal=self.approx_equal,
+ tolerance=self.tolerance)
+ did_match = matcher.match(d1)
+ if did_match:
+ return did_match
+
+
+class SubDictMismatch(object):
+ def __init__(self,
+ key=None,
+ sub_value=None,
+ super_value=None,
+ keys=False):
+ self.key = key
+ self.sub_value = sub_value
+ self.super_value = super_value
+ self.keys = keys
+
+ def describe(self):
+ if self.keys:
+ return "Keys between dictionaries did not match"
+ else:
+ return("Dictionaries do not match at %s. d1: %s d2: %s"
+ % (self.key,
+ self.super_value,
+ self.sub_value))
+
+ def get_details(self):
+ return {}
+
+
+class IsSubDictOf(object):
+
+ def __init__(self, super_dict):
+ self.super_dict = super_dict
+
+ def __str__(self):
+ return 'IsSubDictOf(%s)' % (self.super_dict)
+
+ def match(self, sub_dict):
+ """Assert a sub_dict is subset of super_dict."""
+ if not set(sub_dict.keys()).issubset(set(self.super_dict.keys())):
+ return SubDictMismatch(keys=True)
+ for k, sub_value in sub_dict.items():
+ super_value = self.super_dict[k]
+ if isinstance(sub_value, dict):
+ matcher = IsSubDictOf(super_value)
+ did_match = matcher.match(sub_value)
+ if did_match is not None:
+ return did_match
+ elif 'DONTCARE' in (sub_value, super_value):
+ continue
+ else:
+ if sub_value != super_value:
+ return SubDictMismatch(k, sub_value, super_value)
diff --git a/nova/tests/network/test_api.py b/nova/tests/network/test_api.py
index 04f646ef0..a033c6506 100644
--- a/nova/tests/network/test_api.py
+++ b/nova/tests/network/test_api.py
@@ -17,13 +17,19 @@
"""Tests for network API"""
+import random
+
from nova import context
+from nova import exception
from nova import network
from nova.network import rpcapi as network_rpcapi
from nova.openstack.common import rpc
from nova import test
+FAKE_UUID = 'a47ae74e-ab08-547f-9eee-ffd23fc46c16'
+
+
class ApiTestCase(test.TestCase):
def setUp(self):
super(ApiTestCase, self).setUp()
@@ -111,7 +117,6 @@ class ApiTestCase(test.TestCase):
'project_id': 'fake_project_id',
'floating_addresses': None}
if multi_host:
- expected['host'] = 'fake_compute_dest'
expected['floating_addresses'] = ['fake_float1', 'fake_float2']
return fake_instance, fake_migration, expected
@@ -119,6 +124,7 @@ class ApiTestCase(test.TestCase):
info = {'kwargs': {}}
arg1, arg2, expected = self._stub_migrate_instance_calls(
'migrate_instance_start', True, info)
+ expected['host'] = 'fake_compute_source'
self.network_api.migrate_instance_start(self.context, arg1, arg2)
self.assertEqual(info['kwargs'], expected)
@@ -133,6 +139,7 @@ class ApiTestCase(test.TestCase):
info = {'kwargs': {}}
arg1, arg2, expected = self._stub_migrate_instance_calls(
'migrate_instance_finish', True, info)
+ expected['host'] = 'fake_compute_dest'
self.network_api.migrate_instance_finish(self.context, arg1, arg2)
self.assertEqual(info['kwargs'], expected)
@@ -142,3 +149,64 @@ class ApiTestCase(test.TestCase):
'migrate_instance_finish', False, info)
self.network_api.migrate_instance_finish(self.context, arg1, arg2)
self.assertEqual(info['kwargs'], expected)
+
+ def test_is_multi_host_instance_has_no_fixed_ip(self):
+ def fake_fixed_ip_get_by_instance(ctxt, uuid):
+ raise exception.FixedIpNotFoundForInstance
+ self.stubs.Set(self.network_api.db, 'fixed_ip_get_by_instance',
+ fake_fixed_ip_get_by_instance)
+ instance = {'uuid': FAKE_UUID}
+ self.assertFalse(self.network_api._is_multi_host(self.context,
+ instance))
+
+ def test_is_multi_host_network_has_no_project_id(self):
+ is_multi_host = random.choice([True, False])
+ network = {'project_id': None,
+ 'multi_host': is_multi_host, }
+ network_ref = self.network_api.db.network_create_safe(
+ self.context.elevated(),
+ network)
+
+ def fake_fixed_ip_get_by_instance(ctxt, uuid):
+ fixed_ip = [{'network_id': network_ref['id'],
+ 'instance_uuid': FAKE_UUID, }]
+ return fixed_ip
+
+ self.stubs.Set(self.network_api.db, 'fixed_ip_get_by_instance',
+ fake_fixed_ip_get_by_instance)
+
+ instance = {'uuid': FAKE_UUID}
+ result = self.network_api._is_multi_host(self.context, instance)
+ self.assertEqual(is_multi_host, result)
+
+ def test_is_multi_host_network_has_project_id(self):
+ is_multi_host = random.choice([True, False])
+ network = {'project_id': self.context.project_id,
+ 'multi_host': is_multi_host, }
+ network_ref = self.network_api.db.network_create_safe(
+ self.context.elevated(),
+ network)
+
+ def fake_fixed_ip_get_by_instance(ctxt, uuid):
+ fixed_ip = [{'network_id': network_ref['id'],
+ 'instance_uuid': FAKE_UUID, }]
+ return fixed_ip
+
+ self.stubs.Set(self.network_api.db, 'fixed_ip_get_by_instance',
+ fake_fixed_ip_get_by_instance)
+
+ instance = {'uuid': FAKE_UUID}
+ result = self.network_api._is_multi_host(self.context, instance)
+ self.assertEqual(is_multi_host, result)
+
+ def test_get_backdoor_port(self):
+ backdoor_port = 59697
+
+ def fake_get_backdoor_port(ctxt):
+ return backdoor_port
+
+ self.stubs.Set(self.network_api.network_rpcapi, 'get_backdoor_port',
+ fake_get_backdoor_port)
+
+ port = self.network_api.get_backdoor_port(self.context)
+ self.assertEqual(port, backdoor_port)
diff --git a/nova/tests/network/test_linux_net.py b/nova/tests/network/test_linux_net.py
index f69023ef8..666ce6dab 100644
--- a/nova/tests/network/test_linux_net.py
+++ b/nova/tests/network/test_linux_net.py
@@ -19,9 +19,9 @@ import os
import mox
+from nova import config
from nova import context
from nova import db
-from nova import flags
from nova.network import linux_net
from nova.openstack.common import fileutils
from nova.openstack.common import importutils
@@ -29,12 +29,9 @@ from nova.openstack.common import log as logging
from nova import test
from nova import utils
-
-FLAGS = flags.FLAGS
-
+CONF = config.CONF
LOG = logging.getLogger(__name__)
-
HOST = "testhost"
instances = {'00000000-0000-0000-0000-0000000000000000':
@@ -216,7 +213,7 @@ class LinuxNetworkTestCase(test.TestCase):
def setUp(self):
super(LinuxNetworkTestCase, self).setUp()
- network_driver = FLAGS.network_driver
+ network_driver = CONF.network_driver
self.driver = importutils.import_module(network_driver)
self.driver.db = db
self.context = context.RequestContext('testuser', 'testproject',
@@ -309,7 +306,7 @@ class LinuxNetworkTestCase(test.TestCase):
self.assertEquals(actual_hosts, expected)
def test_get_dhcp_opts_for_nw00(self):
- expected_opts = 'NW-0,3\nNW-3,3\nNW-4,3'
+ expected_opts = 'NW-3,3\nNW-4,3'
actual_opts = self.driver.get_dhcp_opts(self.context, networks[0])
self.assertEquals(actual_opts, expected_opts)
diff --git a/nova/tests/network/test_manager.py b/nova/tests/network/test_manager.py
index 77fccd904..9c4b29049 100644
--- a/nova/tests/network/test_manager.py
+++ b/nova/tests/network/test_manager.py
@@ -32,6 +32,7 @@ import nova.policy
from nova import test
from nova.tests import fake_ldap
from nova.tests import fake_network
+from nova.tests import matchers
from nova import utils
@@ -168,7 +169,7 @@ class FlatNetworkTestCase(test.TestCase):
'bridge_interface': None,
'vlan': None}
- self.assertDictMatch(nw, check)
+ self.assertThat(nw, matchers.DictMatches(check))
check = {'broadcast': '192.168.%d.255' % nid,
'dhcp_server': '192.168.1.1',
@@ -184,13 +185,13 @@ class FlatNetworkTestCase(test.TestCase):
'00000000-0000-0000-0000-00000000000000%02d' % nid,
'should_create_vlan': False,
'should_create_bridge': False}
- self.assertDictMatch(info, check)
+ self.assertThat(info, matchers.DictMatches(check))
check = [{'enabled': 'DONTCARE',
'ip': '2001:db8:0:1::%x' % nid,
'netmask': 64,
'gateway': 'fe80::def'}]
- self.assertDictListMatch(info['ip6s'], check)
+ self.assertThat(info['ip6s'], matchers.DictListMatches(check))
num_fixed_ips = len(info['ips'])
check = [{'enabled': 'DONTCARE',
@@ -198,7 +199,7 @@ class FlatNetworkTestCase(test.TestCase):
'netmask': '255.255.255.0',
'gateway': '192.168.%d.1' % nid}
for ip_num in xrange(1, num_fixed_ips + 1)]
- self.assertDictListMatch(info['ips'], check)
+ self.assertThat(info['ips'], matchers.DictListMatches(check))
def test_validate_networks(self):
self.mox.StubOutWithMock(db, 'network_get')
@@ -1523,6 +1524,19 @@ class RPCAllocateTestCase(test.TestCase):
self.assertEqual(rval, address)
+class BackdoorPortTestCase(test.TestCase):
+ """Tests nova.network.manager.get_backdoor_port"""
+ def setUp(self):
+ super(BackdoorPortTestCase, self).setUp()
+ self.manager = network_manager.NetworkManager()
+ self.manager.backdoor_port = 59697
+ self.context = context.RequestContext('fake', 'fake')
+
+ def test_backdoor_port(self):
+ port = self.manager.get_backdoor_port(self.context)
+ self.assertEqual(port, self.manager.backdoor_port)
+
+
class TestFloatingIPManager(network_manager.FloatingIP,
network_manager.NetworkManager):
"""Dummy manager that implements FloatingIP"""
@@ -1665,11 +1679,14 @@ class FloatingIPTestCase(test.TestCase):
self.stubs.Set(self.network.l3driver, 'remove_floating_ip',
fake_remove_floating_ip)
self.mox.ReplayAll()
- floating_ip_addresses = ['172.24.4.23', '172.24.4.24', '172.24.4.25']
- self.network.migrate_instance_start(self.context, FAKEUUID,
- 3, self.project_id,
- 'fake_source', 'fake_dest',
- floating_ip_addresses)
+ addresses = ['172.24.4.23', '172.24.4.24', '172.24.4.25']
+ self.network.migrate_instance_start(self.context,
+ instance_uuid=FAKEUUID,
+ floating_addresses=addresses,
+ rxtx_factor=3,
+ project_id=self.project_id,
+ source='fake_source',
+ dest='fake_dest')
self.assertEqual(called['count'], 2)
@@ -1703,11 +1720,14 @@ class FloatingIPTestCase(test.TestCase):
self.stubs.Set(self.network.l3driver, 'add_floating_ip',
fake_add_floating_ip)
self.mox.ReplayAll()
- floating_ip_addresses = ['172.24.4.23', '172.24.4.24', '172.24.4.25']
- self.network.migrate_instance_finish(self.context, FAKEUUID,
- 3, self.project_id,
- 'fake_source', 'fake_dest',
- floating_ip_addresses)
+ addresses = ['172.24.4.23', '172.24.4.24', '172.24.4.25']
+ self.network.migrate_instance_finish(self.context,
+ instance_uuid=FAKEUUID,
+ floating_addresses=addresses,
+ host='fake_dest',
+ rxtx_factor=3,
+ project_id=self.project_id,
+ source='fake_source')
self.assertEqual(called['count'], 2)
diff --git a/nova/tests/network/test_quantumv2.py b/nova/tests/network/test_quantumv2.py
index a8f29e012..7c19698fb 100644
--- a/nova/tests/network/test_quantumv2.py
+++ b/nova/tests/network/test_quantumv2.py
@@ -15,20 +15,22 @@
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
+import uuid
+
import mox
+from nova import config
from nova import context
from nova import exception
from nova.network import model
from nova.network import quantumv2
from nova.network.quantumv2 import api as quantumapi
-from nova.openstack.common import cfg
from nova import test
-from nova import utils
from quantumclient.v2_0 import client
-FLAGS = cfg.CONF
+CONF = config.CONF
+
#NOTE: Quantum client raises Exception which is discouraged by HACKING.
# We set this variable here and use it for assertions below to avoid
# the hacking checks until we can make quantum client throw a custom
@@ -92,9 +94,9 @@ class TestQuantumClient(test.TestCase):
auth_token='token')
self.mox.StubOutWithMock(client.Client, "__init__")
client.Client.__init__(
- endpoint_url=FLAGS.quantum_url,
+ endpoint_url=CONF.quantum_url,
token=my_context.auth_token,
- timeout=FLAGS.quantum_url_timeout).AndReturn(None)
+ timeout=CONF.quantum_url_timeout).AndReturn(None)
self.mox.ReplayAll()
quantumv2.get_client(my_context)
@@ -107,23 +109,17 @@ class TestQuantumClient(test.TestCase):
my_context)
def test_withouttoken_keystone_not_auth(self):
- # self.flags(quantum_auth_strategy=None) fail to work
- old_quantum_auth_strategy = FLAGS.quantum_auth_strategy
- setattr(FLAGS, 'quantum_auth_strategy', None)
+ self.flags(quantum_auth_strategy=None)
self.flags(quantum_url='http://anyhost/')
self.flags(quantum_url_timeout=30)
my_context = context.RequestContext('userid', 'my_tenantid')
self.mox.StubOutWithMock(client.Client, "__init__")
client.Client.__init__(
- endpoint_url=FLAGS.quantum_url,
+ endpoint_url=CONF.quantum_url,
auth_strategy=None,
- timeout=FLAGS.quantum_url_timeout).AndReturn(None)
+ timeout=CONF.quantum_url_timeout).AndReturn(None)
self.mox.ReplayAll()
- try:
- quantumv2.get_client(my_context)
- finally:
- setattr(FLAGS, 'quantum_auth_strategy',
- old_quantum_auth_strategy)
+ quantumv2.get_client(my_context)
class TestQuantumv2(test.TestCase):
@@ -139,7 +135,7 @@ class TestQuantumv2(test.TestCase):
'auth_token',
'bff4a5a6b9eb4ea2a6efec6eefb77936')
self.instance = {'project_id': '9d049e4b60b64716978ab415e6fbd5c0',
- 'uuid': str(utils.gen_uuid()),
+ 'uuid': str(uuid.uuid4()),
'display_name': 'test_instance',
'security_groups': []}
self.nets1 = [{'id': 'my_netid1',
@@ -225,7 +221,7 @@ class TestQuantumv2(test.TestCase):
self.mox.UnsetStubs()
self.mox.VerifyAll()
finally:
- FLAGS.reset()
+ CONF.reset()
super(TestQuantumv2, self).tearDown()
def _verify_nw_info(self, nw_inf, index=0):
diff --git a/nova/tests/network/test_rpcapi.py b/nova/tests/network/test_rpcapi.py
index de3f19cea..500cc9583 100644
--- a/nova/tests/network/test_rpcapi.py
+++ b/nova/tests/network/test_rpcapi.py
@@ -18,14 +18,13 @@
Unit Tests for nova.network.rpcapi
"""
+from nova import config
from nova import context
-from nova import flags
from nova.network import rpcapi as network_rpcapi
from nova.openstack.common import rpc
from nova import test
-
-FLAGS = flags.FLAGS
+CONF = config.CONF
class NetworkRpcAPITestCase(test.TestCase):
@@ -34,7 +33,7 @@ class NetworkRpcAPITestCase(test.TestCase):
rpcapi = network_rpcapi.NetworkAPI()
expected_retval = 'foo' if method == 'call' else None
expected_version = kwargs.pop('version', rpcapi.BASE_RPC_API_VERSION)
- expected_topic = FLAGS.network_topic
+ expected_topic = CONF.network_topic
expected_msg = rpcapi.make_msg(method, **kwargs)
if 'source_compute' in expected_msg['args']:
# Fix up for migrate_instance_* calls.
@@ -52,7 +51,7 @@ class NetworkRpcAPITestCase(test.TestCase):
if method != 'deallocate_fixed_ip':
del expected_msg['args']['host']
host = kwargs['host']
- expected_topic = rpc.queue_get_for(ctxt, FLAGS.network_topic, host)
+ expected_topic = rpc.queue_get_for(ctxt, CONF.network_topic, host)
expected_msg['version'] = expected_version
self.fake_args = None
@@ -121,6 +120,9 @@ class NetworkRpcAPITestCase(test.TestCase):
self._test_network_api('get_instance_id_by_floating_address',
rpc_method='call', address='w.x.y.z')
+ def test_get_backdoor_port(self):
+ self._test_network_api('get_backdoor_port', rpc_method='call')
+
def test_get_vifs_by_instance(self):
self._test_network_api('get_vifs_by_instance',
rpc_method='call', instance_id='fake_id')
diff --git a/nova/tests/policy.json b/nova/tests/policy.json
index efe2724ad..b08e89baa 100644
--- a/nova/tests/policy.json
+++ b/nova/tests/policy.json
@@ -94,6 +94,7 @@
"compute_extension:disk_config": "",
"compute_extension:extended_server_attributes": "",
"compute_extension:extended_status": "",
+ "compute_extension:fixed_ips": "",
"compute_extension:flavor_access": "",
"compute_extension:flavor_disabled": "",
"compute_extension:flavor_rxtx": "",
@@ -104,6 +105,8 @@
"compute_extension:floating_ip_dns": "",
"compute_extension:floating_ip_pools": "",
"compute_extension:floating_ips": "",
+ "compute_extension:fping": "",
+ "compute_extension:fping:all_tenants": "is_admin:True",
"compute_extension:hosts": "",
"compute_extension:hypervisors": "",
"compute_extension:instance_usage_audit_log": "",
@@ -117,6 +120,7 @@
"compute_extension:rescue": "",
"compute_extension:security_groups": "",
"compute_extension:server_diagnostics": "",
+ "compute_extension:services": "",
"compute_extension:simple_tenant_usage:show": "",
"compute_extension:simple_tenant_usage:list": "",
"compute_extension:users": "",
diff --git a/nova/tests/runtime_flags.py b/nova/tests/runtime_flags.py
index 7cc8c3219..33d265cf4 100644
--- a/nova/tests/runtime_flags.py
+++ b/nova/tests/runtime_flags.py
@@ -16,8 +16,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-from nova import flags
+from nova import config
from nova.openstack.common import cfg
-FLAGS = flags.FLAGS
-FLAGS.register_opt(cfg.IntOpt('runtime_answer', default=54, help='test flag'))
+CONF = config.CONF
+CONF.register_opt(cfg.IntOpt('runtime_answer', default=54, help='test flag'))
diff --git a/nova/tests/scheduler/fakes.py b/nova/tests/scheduler/fakes.py
index ba255c32c..3c7b462d0 100644
--- a/nova/tests/scheduler/fakes.py
+++ b/nova/tests/scheduler/fakes.py
@@ -29,38 +29,42 @@ COMPUTE_NODES = [
dict(id=1, local_gb=1024, memory_mb=1024, vcpus=1,
disk_available_least=512, free_ram_mb=512, vcpus_used=1,
free_disk_mb=512, local_gb_used=0, updated_at=None,
- service=dict(host='host1', disabled=False)),
+ service=dict(host='host1', disabled=False),
+ hypervisor_hostname='node1'),
dict(id=2, local_gb=2048, memory_mb=2048, vcpus=2,
disk_available_least=1024, free_ram_mb=1024, vcpus_used=2,
free_disk_mb=1024, local_gb_used=0, updated_at=None,
- service=dict(host='host2', disabled=True)),
+ service=dict(host='host2', disabled=True),
+ hypervisor_hostname='node2'),
dict(id=3, local_gb=4096, memory_mb=4096, vcpus=4,
disk_available_least=3072, free_ram_mb=3072, vcpus_used=1,
free_disk_mb=3072, local_gb_used=0, updated_at=None,
- service=dict(host='host3', disabled=False)),
+ service=dict(host='host3', disabled=False),
+ hypervisor_hostname='node3'),
dict(id=4, local_gb=8192, memory_mb=8192, vcpus=8,
disk_available_least=8192, free_ram_mb=8192, vcpus_used=0,
free_disk_mb=8192, local_gb_used=0, updated_at=None,
- service=dict(host='host4', disabled=False)),
+ service=dict(host='host4', disabled=False),
+ hypervisor_hostname='node4'),
# Broken entry
dict(id=5, local_gb=1024, memory_mb=1024, vcpus=1, service=None),
]
INSTANCES = [
dict(root_gb=512, ephemeral_gb=0, memory_mb=512, vcpus=1,
- host='host1'),
+ host='host1', node='node1'),
dict(root_gb=512, ephemeral_gb=0, memory_mb=512, vcpus=1,
- host='host2'),
+ host='host2', node='node2'),
dict(root_gb=512, ephemeral_gb=0, memory_mb=512, vcpus=1,
- host='host2'),
+ host='host2', node='node2'),
dict(root_gb=1024, ephemeral_gb=0, memory_mb=1024, vcpus=1,
- host='host3'),
+ host='host3', node='node3'),
# Broken host
dict(root_gb=1024, ephemeral_gb=0, memory_mb=1024, vcpus=1,
host=None),
# No matching host
dict(root_gb=1024, ephemeral_gb=0, memory_mb=1024, vcpus=1,
- host='host5'),
+ host='host5', node='node5'),
]
@@ -96,8 +100,8 @@ class FakeHostManager(host_manager.HostManager):
class FakeHostState(host_manager.HostState):
- def __init__(self, host, topic, attribute_dict):
- super(FakeHostState, self).__init__(host, topic)
+ def __init__(self, host, node, attribute_dict):
+ super(FakeHostState, self).__init__(host, node)
for (key, val) in attribute_dict.iteritems():
setattr(self, key, val)
diff --git a/nova/tests/scheduler/test_filter_scheduler.py b/nova/tests/scheduler/test_filter_scheduler.py
index be6bc3317..e9412ba60 100644
--- a/nova/tests/scheduler/test_filter_scheduler.py
+++ b/nova/tests/scheduler/test_filter_scheduler.py
@@ -27,12 +27,12 @@ from nova import exception
from nova.scheduler import driver
from nova.scheduler import filter_scheduler
from nova.scheduler import host_manager
-from nova.scheduler import least_cost
+from nova.scheduler import weights
from nova.tests.scheduler import fakes
from nova.tests.scheduler import test_scheduler
-def fake_filter_hosts(hosts, filter_properties):
+def fake_get_filtered_hosts(hosts, filter_properties):
return list(hosts)
@@ -98,17 +98,8 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
fake_context, request_spec, None, None, None, None, {})
self.assertTrue(self.was_admin)
- def test_schedule_bad_topic(self):
- """Parameter checking."""
- sched = fakes.FakeFilterScheduler()
- fake_context = context.RequestContext('user', 'project')
- self.assertRaises(NotImplementedError, sched._schedule, fake_context,
- "foo", {}, {})
-
def test_scheduler_includes_launch_index(self):
fake_context = context.RequestContext('user', 'project')
- fake_kwargs = {'fake_kwarg1': 'fake_value1',
- 'fake_kwarg2': 'fake_value2'}
instance_opts = {'fake_opt1': 'meow'}
request_spec = {'instance_uuids': ['fake-uuid1', 'fake-uuid2'],
'instance_properties': instance_opts}
@@ -129,9 +120,8 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.mox.StubOutWithMock(self.driver, '_schedule')
self.mox.StubOutWithMock(self.driver, '_provision_resource')
- self.driver._schedule(fake_context, 'compute',
- request_spec, {}, ['fake-uuid1', 'fake-uuid2']
- ).AndReturn(['host1', 'host2'])
+ self.driver._schedule(fake_context, request_spec, {},
+ ['fake-uuid1', 'fake-uuid2']).AndReturn(['host1', 'host2'])
# instance 1
self.driver._provision_resource(
fake_context, 'host1',
@@ -155,19 +145,19 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.next_weight = 1.0
- def _fake_weighted_sum(functions, hosts, options):
+ def _fake_weigh_objects(_self, functions, hosts, options):
self.next_weight += 2.0
host_state = hosts[0]
- return least_cost.WeightedHost(self.next_weight,
- host_state=host_state)
+ return [weights.WeighedHost(host_state, self.next_weight)]
sched = fakes.FakeFilterScheduler()
fake_context = context.RequestContext('user', 'project',
is_admin=True)
- self.stubs.Set(sched.host_manager, 'filter_hosts',
- fake_filter_hosts)
- self.stubs.Set(least_cost, 'weighted_sum', _fake_weighted_sum)
+ self.stubs.Set(sched.host_manager, 'get_filtered_hosts',
+ fake_get_filtered_hosts)
+ self.stubs.Set(weights.HostWeightHandler,
+ 'get_weighed_objects', _fake_weigh_objects)
fakes.mox_host_manager_db_calls(self.mox, fake_context)
request_spec = {'num_instances': 10,
@@ -181,11 +171,10 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
'vcpus': 1,
'os_type': 'Linux'}}
self.mox.ReplayAll()
- weighted_hosts = sched._schedule(fake_context, 'compute',
- request_spec, {})
- self.assertEquals(len(weighted_hosts), 10)
- for weighted_host in weighted_hosts:
- self.assertTrue(weighted_host.host_state is not None)
+ weighed_hosts = sched._schedule(fake_context, request_spec, {})
+ self.assertEquals(len(weighed_hosts), 10)
+ for weighed_host in weighed_hosts:
+ self.assertTrue(weighed_host.obj is not None)
def test_schedule_prep_resize_doesnt_update_host(self):
fake_context = context.RequestContext('user', 'project',
@@ -194,8 +183,8 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
sched = fakes.FakeFilterScheduler()
def _return_hosts(*args, **kwargs):
- host_state = host_manager.HostState('host2', 'compute')
- return [least_cost.WeightedHost(1.0, host_state=host_state)]
+ host_state = host_manager.HostState('host2', 'node2')
+ return [weights.WeighedHost(host_state, 1.0)]
self.stubs.Set(sched, '_schedule', _return_hosts)
@@ -214,19 +203,6 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
instance, {}, None)
self.assertEqual(info['called'], 0)
- def test_get_cost_functions(self):
- fixture = fakes.FakeFilterScheduler()
- fns = fixture.get_cost_functions()
- self.assertEquals(len(fns), 1)
- weight, fn = fns[0]
- self.assertEquals(weight, -1.0)
- hostinfo = host_manager.HostState('host', 'compute')
- hostinfo.update_from_compute_node(dict(memory_mb=1000,
- local_gb=0, vcpus=1, disk_available_least=1000,
- free_disk_mb=1000, free_ram_mb=872, vcpus_used=0,
- local_gb_used=0, updated_at=None))
- self.assertEquals(872, fn(hostinfo, {}))
-
def test_max_attempts(self):
self.flags(scheduler_max_attempts=4)
@@ -248,7 +224,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
request_spec = dict(instance_properties=instance_properties)
filter_properties = {}
- sched._schedule(self.context, 'compute', request_spec,
+ sched._schedule(self.context, request_spec,
filter_properties=filter_properties)
# should not have retry info in the populated filter properties:
@@ -263,7 +239,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
request_spec = dict(instance_properties=instance_properties)
filter_properties = {}
- sched._schedule(self.context, 'compute', request_spec,
+ sched._schedule(self.context, request_spec,
filter_properties=filter_properties)
num_attempts = filter_properties['retry']['num_attempts']
@@ -280,7 +256,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
retry = dict(num_attempts=1)
filter_properties = dict(retry=retry)
- sched._schedule(self.context, 'compute', request_spec,
+ sched._schedule(self.context, request_spec,
filter_properties=filter_properties)
num_attempts = filter_properties['retry']['num_attempts']
@@ -298,7 +274,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
filter_properties = dict(retry=retry)
self.assertRaises(exception.NoValidHost, sched._schedule, self.context,
- 'compute', request_spec, filter_properties=filter_properties)
+ request_spec, filter_properties=filter_properties)
def test_add_retry_host(self):
retry = dict(num_attempts=1, hosts=[])
@@ -318,7 +294,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
filter_properties = {'retry': retry}
sched = fakes.FakeFilterScheduler()
- host_state = host_manager.HostState('host', 'compute')
+ host_state = host_manager.HostState('host', 'node')
host_state.limits['vcpus'] = 5
sched._post_select_populate_filter_properties(filter_properties,
host_state)
@@ -342,15 +318,15 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
filter_properties = {'retry': retry}
reservations = None
- host = fakes.FakeHostState('host', 'compute', {})
- weighted_host = least_cost.WeightedHost(1, host)
- hosts = [weighted_host]
+ host = fakes.FakeHostState('host', 'node', {})
+ weighed_host = weights.WeighedHost(host, 1)
+ weighed_hosts = [weighed_host]
self.mox.StubOutWithMock(sched, '_schedule')
self.mox.StubOutWithMock(sched.compute_rpcapi, 'prep_resize')
- sched._schedule(self.context, 'compute', request_spec,
- filter_properties, [instance['uuid']]).AndReturn(hosts)
+ sched._schedule(self.context, request_spec, filter_properties,
+ [instance['uuid']]).AndReturn(weighed_hosts)
sched.compute_rpcapi.prep_resize(self.context, image, instance,
instance_type, 'host', reservations, request_spec=request_spec,
filter_properties=filter_properties)
diff --git a/nova/tests/scheduler/test_host_filters.py b/nova/tests/scheduler/test_host_filters.py
index e789f169e..84667eb9e 100644
--- a/nova/tests/scheduler/test_host_filters.py
+++ b/nova/tests/scheduler/test_host_filters.py
@@ -18,10 +18,10 @@ Tests For Scheduler Host Filters.
import httplib
import stubout
+from nova import config
from nova import context
from nova import db
from nova import exception
-from nova import flags
from nova.openstack.common import jsonutils
from nova.scheduler import filters
from nova.scheduler.filters import extra_specs_ops
@@ -30,7 +30,7 @@ from nova import test
from nova.tests.scheduler import fakes
from nova import utils
-
+CONF = config.CONF
DATA = ''
@@ -262,36 +262,37 @@ class HostFiltersTestCase(test.TestCase):
self.json_query = jsonutils.dumps(
['and', ['>=', '$free_ram_mb', 1024],
['>=', '$free_disk_mb', 200 * 1024]])
- # This has a side effect of testing 'get_filter_classes'
- # when specifying a method (in this case, our standard filters)
- classes = filters.get_filter_classes(
- ['nova.scheduler.filters.standard_filters'])
+ filter_handler = filters.HostFilterHandler()
+ classes = filter_handler.get_matching_classes(
+ ['nova.scheduler.filters.all_filters'])
self.class_map = {}
for cls in classes:
self.class_map[cls.__name__] = cls
- def test_get_filter_classes(self):
- classes = filters.get_filter_classes(
- ['nova.tests.scheduler.test_host_filters.TestFilter'])
- self.assertEqual(len(classes), 1)
- self.assertEqual(classes[0].__name__, 'TestFilter')
- # Test a specific class along with our standard filters
- classes = filters.get_filter_classes(
- ['nova.tests.scheduler.test_host_filters.TestFilter',
- 'nova.scheduler.filters.standard_filters'])
- self.assertEqual(len(classes), 1 + len(self.class_map))
-
- def test_get_filter_classes_raises_on_invalid_classes(self):
- self.assertRaises(ImportError,
- filters.get_filter_classes,
- ['nova.tests.scheduler.test_host_filters.NoExist'])
- self.assertRaises(exception.ClassNotFound,
- filters.get_filter_classes,
- ['nova.tests.scheduler.test_host_filters.TestBogusFilter'])
+ def test_standard_filters_is_deprecated(self):
+ info = {'called': False}
+
+ def _fake_deprecated(*args, **kwargs):
+ info['called'] = True
+
+ self.stubs.Set(filters.LOG, 'deprecated', _fake_deprecated)
+
+ filter_handler = filters.HostFilterHandler()
+ filter_handler.get_matching_classes(
+ ['nova.scheduler.filters.standard_filters'])
+
+ self.assertTrue(info['called'])
+ self.assertIn('AllHostsFilter', self.class_map)
+ self.assertIn('ComputeFilter', self.class_map)
+
+ def test_all_filters(self):
+ # Double check at least a couple of known filters exist
+ self.assertIn('AllHostsFilter', self.class_map)
+ self.assertIn('ComputeFilter', self.class_map)
def test_all_host_filter(self):
filt_cls = self.class_map['AllHostsFilter']()
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
self.assertTrue(filt_cls.host_passes(host, {}))
def _stub_service_is_up(self, ret_value):
@@ -301,7 +302,7 @@ class HostFiltersTestCase(test.TestCase):
def test_affinity_different_filter_passes(self):
filt_cls = self.class_map['DifferentHostFilter']()
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host2'})
instance_uuid = instance.uuid
@@ -314,7 +315,7 @@ class HostFiltersTestCase(test.TestCase):
def test_affinity_different_filter_no_list_passes(self):
filt_cls = self.class_map['DifferentHostFilter']()
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host2'})
instance_uuid = instance.uuid
@@ -327,7 +328,7 @@ class HostFiltersTestCase(test.TestCase):
def test_affinity_different_filter_fails(self):
filt_cls = self.class_map['DifferentHostFilter']()
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host1'})
instance_uuid = instance.uuid
@@ -340,7 +341,7 @@ class HostFiltersTestCase(test.TestCase):
def test_affinity_different_filter_handles_none(self):
filt_cls = self.class_map['DifferentHostFilter']()
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host2'})
instance_uuid = instance.uuid
@@ -352,7 +353,7 @@ class HostFiltersTestCase(test.TestCase):
def test_affinity_same_filter_no_list_passes(self):
filt_cls = self.class_map['SameHostFilter']()
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host1'})
instance_uuid = instance.uuid
@@ -365,7 +366,7 @@ class HostFiltersTestCase(test.TestCase):
def test_affinity_same_filter_passes(self):
filt_cls = self.class_map['SameHostFilter']()
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host1'})
instance_uuid = instance.uuid
@@ -378,7 +379,7 @@ class HostFiltersTestCase(test.TestCase):
def test_affinity_same_filter_fails(self):
filt_cls = self.class_map['SameHostFilter']()
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host2'})
instance_uuid = instance.uuid
@@ -391,7 +392,7 @@ class HostFiltersTestCase(test.TestCase):
def test_affinity_same_filter_handles_none(self):
filt_cls = self.class_map['SameHostFilter']()
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host2'})
instance_uuid = instance.uuid
@@ -403,7 +404,7 @@ class HostFiltersTestCase(test.TestCase):
def test_affinity_simple_cidr_filter_passes(self):
filt_cls = self.class_map['SimpleCIDRAffinityFilter']()
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
host.capabilities = {'host_ip': '10.8.1.1'}
affinity_ip = "10.8.1.100"
@@ -417,7 +418,7 @@ class HostFiltersTestCase(test.TestCase):
def test_affinity_simple_cidr_filter_fails(self):
filt_cls = self.class_map['SimpleCIDRAffinityFilter']()
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
host.capabilities = {'host_ip': '10.8.1.1'}
affinity_ip = "10.8.1.100"
@@ -431,9 +432,9 @@ class HostFiltersTestCase(test.TestCase):
def test_affinity_simple_cidr_filter_handles_none(self):
filt_cls = self.class_map['SimpleCIDRAffinityFilter']()
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
- affinity_ip = flags.FLAGS.my_ip.split('.')[0:3]
+ affinity_ip = CONF.my_ip.split('.')[0:3]
affinity_ip.append('100')
affinity_ip = str.join('.', affinity_ip)
@@ -448,7 +449,7 @@ class HostFiltersTestCase(test.TestCase):
filter_properties = {'instance_type': {'memory_mb': 1024}}
capabilities = {'enabled': True}
service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -464,7 +465,7 @@ class HostFiltersTestCase(test.TestCase):
capabilities = {'enabled': True}
service = {'disabled': False}
- host = fakes.FakeHostState('fake_host', 'compute',
+ host = fakes.FakeHostState('fake_host', 'fake_node',
{'capabilities': capabilities,
'service': service})
#True since empty
@@ -490,7 +491,7 @@ class HostFiltersTestCase(test.TestCase):
'instance_type': {'name': 'fake2'}}
capabilities = {'enabled': True}
service = {'disabled': False}
- host = fakes.FakeHostState('fake_host', 'compute',
+ host = fakes.FakeHostState('fake_host', 'fake_node',
{'capabilities': capabilities,
'service': service})
#True since no aggregates
@@ -509,7 +510,7 @@ class HostFiltersTestCase(test.TestCase):
filter_properties = {'instance_type': {'memory_mb': 1024}}
capabilities = {'enabled': True}
service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1023, 'total_usable_ram_mb': 1024,
'capabilities': capabilities, 'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@@ -521,7 +522,7 @@ class HostFiltersTestCase(test.TestCase):
filter_properties = {'instance_type': {'memory_mb': 1024}}
capabilities = {'enabled': True}
service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024, 'total_usable_ram_mb': 1024,
'capabilities': capabilities, 'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -533,7 +534,7 @@ class HostFiltersTestCase(test.TestCase):
filter_properties = {'instance_type': {'memory_mb': 1024}}
capabilities = {'enabled': True}
service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': -1024, 'total_usable_ram_mb': 2048,
'capabilities': capabilities, 'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -547,7 +548,7 @@ class HostFiltersTestCase(test.TestCase):
'ephemeral_gb': 1}}
capabilities = {'enabled': True}
service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 13,
'capabilities': capabilities, 'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -560,7 +561,7 @@ class HostFiltersTestCase(test.TestCase):
'ephemeral_gb': 1}}
capabilities = {'enabled': True}
service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 13,
'capabilities': capabilities, 'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@@ -574,7 +575,7 @@ class HostFiltersTestCase(test.TestCase):
capabilities = {'enabled': True}
service = {'disabled': False}
# 1GB used... so 119GB allowed...
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 12,
'capabilities': capabilities, 'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -589,7 +590,7 @@ class HostFiltersTestCase(test.TestCase):
capabilities = {'enabled': True}
service = {'disabled': False}
# 1GB used... so 119GB allowed...
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 12,
'capabilities': capabilities, 'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@@ -600,7 +601,7 @@ class HostFiltersTestCase(test.TestCase):
filter_properties = {'instance_type': {'memory_mb': 1024}}
capabilities = {'enabled': True}
service = {'disabled': True}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@@ -611,7 +612,7 @@ class HostFiltersTestCase(test.TestCase):
filter_properties = {'instance_type': {'memory_mb': 1024}}
capabilities = {'enabled': True}
service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@@ -622,33 +623,11 @@ class HostFiltersTestCase(test.TestCase):
filter_properties = {'instance_type': {'memory_mb': 1024}}
capabilities = {'enabled': False}
service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
- def test_compute_filter_passes_on_volume(self):
- self._stub_service_is_up(True)
- filt_cls = self.class_map['ComputeFilter']()
- filter_properties = {'instance_type': {'memory_mb': 1024}}
- capabilities = {'enabled': False}
- service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'volume',
- {'free_ram_mb': 1024, 'capabilities': capabilities,
- 'service': service})
- self.assertTrue(filt_cls.host_passes(host, filter_properties))
-
- def test_compute_filter_passes_on_no_instance_type(self):
- self._stub_service_is_up(True)
- filt_cls = self.class_map['ComputeFilter']()
- filter_properties = {}
- capabilities = {'enabled': False}
- service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'compute',
- {'free_ram_mb': 1024, 'capabilities': capabilities,
- 'service': service})
- self.assertTrue(filt_cls.host_passes(host, filter_properties))
-
def test_image_properties_filter_passes_same_inst_props(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ImagePropertiesFilter']()
@@ -659,7 +638,7 @@ class HostFiltersTestCase(test.TestCase):
capabilities = {'enabled': True,
'supported_instances': [
('x86_64', 'kvm', 'hvm')]}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'capabilities': capabilities})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -673,7 +652,7 @@ class HostFiltersTestCase(test.TestCase):
capabilities = {'enabled': True,
'supported_instances': [
('x86_64', 'kvm', 'hvm')]}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'capabilities': capabilities})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@@ -686,7 +665,7 @@ class HostFiltersTestCase(test.TestCase):
capabilities = {'enabled': True,
'supported_instances': [
('x86_64', 'kvm', 'hvm')]}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'capabilities': capabilities})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -699,7 +678,7 @@ class HostFiltersTestCase(test.TestCase):
capabilities = {'enabled': True,
'supported_instances': [
('x86_64', 'xen', 'xen')]}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'capabilities': capabilities})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@@ -710,7 +689,7 @@ class HostFiltersTestCase(test.TestCase):
capabilities = {'enabled': True,
'supported_instances': [
('x86_64', 'kvm', 'hvm')]}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'capabilities': capabilities})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -722,7 +701,7 @@ class HostFiltersTestCase(test.TestCase):
'vm_mode': 'hvm'}}
filter_properties = {'request_spec': {'image': img_props}}
capabilities = {'enabled': True}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'capabilities': capabilities})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@@ -734,7 +713,7 @@ class HostFiltersTestCase(test.TestCase):
service = {'disabled': False}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': especs}}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
assertion = self.assertTrue if passes else self.assertFalse
@@ -759,7 +738,7 @@ class HostFiltersTestCase(test.TestCase):
filter_properties = {'context': self.context, 'instance_type':
{'memory_mb': 1024}}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'capabilities': capabilities})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -779,7 +758,8 @@ class HostFiltersTestCase(test.TestCase):
self._create_aggregate_with_host(name='fake2', metadata=emeta)
filter_properties = {'context': self.context,
'instance_type': {'memory_mb': 1024, 'extra_specs': especs}}
- host = fakes.FakeHostState('host1', 'compute', {'free_ram_mb': 1024})
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1024})
assertion = self.assertTrue if passes else self.assertFalse
assertion(filt_cls.host_passes(host, filter_properties))
@@ -793,7 +773,8 @@ class HostFiltersTestCase(test.TestCase):
metadata={'opt2': '2'})
filter_properties = {'context': self.context, 'instance_type':
{'memory_mb': 1024, 'extra_specs': extra_specs}}
- host = fakes.FakeHostState('host1', 'compute', {'free_ram_mb': 1024})
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1024})
db.aggregate_host_delete(self.context.elevated(), agg2.id, 'host1')
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@@ -819,7 +800,7 @@ class HostFiltersTestCase(test.TestCase):
'instance_properties': {'image_ref': 'isolated'}
}
}
- host = fakes.FakeHostState('non-isolated', 'compute', {})
+ host = fakes.FakeHostState('non-isolated', 'node', {})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_isolated_hosts_fails_non_isolated_on_isolated(self):
@@ -830,7 +811,7 @@ class HostFiltersTestCase(test.TestCase):
'instance_properties': {'image_ref': 'non-isolated'}
}
}
- host = fakes.FakeHostState('isolated', 'compute', {})
+ host = fakes.FakeHostState('isolated', 'node', {})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_isolated_hosts_passes_isolated_on_isolated(self):
@@ -841,7 +822,7 @@ class HostFiltersTestCase(test.TestCase):
'instance_properties': {'image_ref': 'isolated'}
}
}
- host = fakes.FakeHostState('isolated', 'compute', {})
+ host = fakes.FakeHostState('isolated', 'node', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_isolated_hosts_passes_non_isolated_on_non_isolated(self):
@@ -852,7 +833,7 @@ class HostFiltersTestCase(test.TestCase):
'instance_properties': {'image_ref': 'non-isolated'}
}
}
- host = fakes.FakeHostState('non-isolated', 'compute', {})
+ host = fakes.FakeHostState('non-isolated', 'node', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_json_filter_passes(self):
@@ -862,7 +843,7 @@ class HostFiltersTestCase(test.TestCase):
'ephemeral_gb': 0},
'scheduler_hints': {'query': self.json_query}}
capabilities = {'enabled': True}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024,
'free_disk_mb': 200 * 1024,
'capabilities': capabilities})
@@ -874,7 +855,7 @@ class HostFiltersTestCase(test.TestCase):
'root_gb': 200,
'ephemeral_gb': 0}}
capabilities = {'enabled': True}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 0,
'free_disk_mb': 0,
'capabilities': capabilities})
@@ -887,7 +868,7 @@ class HostFiltersTestCase(test.TestCase):
'ephemeral_gb': 0},
'scheduler_hints': {'query': self.json_query}}
capabilities = {'enabled': True}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1023,
'free_disk_mb': 200 * 1024,
'capabilities': capabilities})
@@ -900,7 +881,7 @@ class HostFiltersTestCase(test.TestCase):
'ephemeral_gb': 0},
'scheduler_hints': {'query': self.json_query}}
capabilities = {'enabled': True}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024,
'free_disk_mb': (200 * 1024) - 1,
'capabilities': capabilities})
@@ -917,7 +898,7 @@ class HostFiltersTestCase(test.TestCase):
'ephemeral_gb': 0},
'scheduler_hints': {'query': json_query}}
capabilities = {'enabled': False}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024,
'free_disk_mb': 200 * 1024,
'capabilities': capabilities})
@@ -934,7 +915,7 @@ class HostFiltersTestCase(test.TestCase):
'scheduler_hints': {'query': json_query}}
capabilities = {'enabled': True}
service = {'disabled': True}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024,
'free_disk_mb': 200 * 1024,
'capabilities': capabilities})
@@ -962,7 +943,7 @@ class HostFiltersTestCase(test.TestCase):
# Passes
capabilities = {'enabled': True, 'opt1': 'match'}
service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 10,
'free_disk_mb': 200,
'capabilities': capabilities,
@@ -972,7 +953,7 @@ class HostFiltersTestCase(test.TestCase):
# Passes
capabilities = {'enabled': True, 'opt1': 'match'}
service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 40,
'free_disk_mb': 400,
'capabilities': capabilities,
@@ -982,7 +963,7 @@ class HostFiltersTestCase(test.TestCase):
# Fails due to capabilities being disabled
capabilities = {'enabled': False, 'opt1': 'match'}
service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'instance_type',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 40,
'free_disk_mb': 400,
'capabilities': capabilities,
@@ -992,7 +973,7 @@ class HostFiltersTestCase(test.TestCase):
# Fails due to being exact memory/disk we don't want
capabilities = {'enabled': True, 'opt1': 'match'}
service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 30,
'free_disk_mb': 300,
'capabilities': capabilities,
@@ -1002,7 +983,7 @@ class HostFiltersTestCase(test.TestCase):
# Fails due to memory lower but disk higher
capabilities = {'enabled': True, 'opt1': 'match'}
service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 20,
'free_disk_mb': 400,
'capabilities': capabilities,
@@ -1012,7 +993,7 @@ class HostFiltersTestCase(test.TestCase):
# Fails due to capabilities 'opt1' not equal
capabilities = {'enabled': True, 'opt1': 'no-match'}
service = {'enabled': True}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 20,
'free_disk_mb': 400,
'capabilities': capabilities,
@@ -1021,7 +1002,7 @@ class HostFiltersTestCase(test.TestCase):
def test_json_filter_basic_operators(self):
filt_cls = self.class_map['JsonFilter']()
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'capabilities': {'enabled': True}})
# (operator, arguments, expected_result)
ops_to_test = [
@@ -1090,14 +1071,14 @@ class HostFiltersTestCase(test.TestCase):
'query': jsonutils.dumps(raw),
},
}
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'capabilities': {'enabled': True}})
self.assertRaises(KeyError,
filt_cls.host_passes, host, filter_properties)
def test_json_filter_empty_filters_pass(self):
filt_cls = self.class_map['JsonFilter']()
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'capabilities': {'enabled': True}})
raw = []
@@ -1117,7 +1098,7 @@ class HostFiltersTestCase(test.TestCase):
def test_json_filter_invalid_num_arguments_fails(self):
filt_cls = self.class_map['JsonFilter']()
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'capabilities': {'enabled': True}})
raw = ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]]
@@ -1138,7 +1119,7 @@ class HostFiltersTestCase(test.TestCase):
def test_json_filter_unknown_variable_ignored(self):
filt_cls = self.class_map['JsonFilter']()
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'capabilities': {'enabled': True}})
raw = ['=', '$........', 1, 1]
@@ -1161,7 +1142,7 @@ class HostFiltersTestCase(test.TestCase):
self._stub_service_is_up(True)
filt_cls = self.class_map['TrustedFilter']()
filter_properties = {'instance_type': {'memory_mb': 1024}}
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_trusted_filter_trusted_and_trusted_passes(self):
@@ -1172,7 +1153,7 @@ class HostFiltersTestCase(test.TestCase):
extra_specs = {'trust:trusted_host': 'trusted'}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_trusted_filter_trusted_and_untrusted_fails(self):
@@ -1183,7 +1164,7 @@ class HostFiltersTestCase(test.TestCase):
extra_specs = {'trust:trusted_host': 'trusted'}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_trusted_filter_untrusted_and_trusted_fails(self):
@@ -1194,7 +1175,7 @@ class HostFiltersTestCase(test.TestCase):
extra_specs = {'trust:trusted_host': 'untrusted'}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_trusted_filter_untrusted_and_untrusted_passes(self):
@@ -1205,28 +1186,28 @@ class HostFiltersTestCase(test.TestCase):
extra_specs = {'trust:trusted_host': 'untrusted'}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_core_filter_passes(self):
filt_cls = self.class_map['CoreFilter']()
filter_properties = {'instance_type': {'vcpus': 1}}
self.flags(cpu_allocation_ratio=2)
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'vcpus_total': 4, 'vcpus_used': 7})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_core_filter_fails_safe(self):
filt_cls = self.class_map['CoreFilter']()
filter_properties = {'instance_type': {'vcpus': 1}}
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_core_filter_fails(self):
filt_cls = self.class_map['CoreFilter']()
filter_properties = {'instance_type': {'vcpus': 1}}
self.flags(cpu_allocation_ratio=2)
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'vcpus_total': 4, 'vcpus_used': 8})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@@ -1246,27 +1227,29 @@ class HostFiltersTestCase(test.TestCase):
filt_cls = self.class_map['AvailabilityZoneFilter']()
service = {'availability_zone': 'nova'}
request = self._make_zone_request('nova')
- host = fakes.FakeHostState('host1', 'compute', {'service': service})
+ host = fakes.FakeHostState('host1', 'node1',
+ {'service': service})
self.assertTrue(filt_cls.host_passes(host, request))
def test_availability_zone_filter_different(self):
filt_cls = self.class_map['AvailabilityZoneFilter']()
service = {'availability_zone': 'nova'}
request = self._make_zone_request('bad')
- host = fakes.FakeHostState('host1', 'compute', {'service': service})
+ host = fakes.FakeHostState('host1', 'node1',
+ {'service': service})
self.assertFalse(filt_cls.host_passes(host, request))
def test_retry_filter_disabled(self):
"""Test case where retry/re-scheduling is disabled"""
filt_cls = self.class_map['RetryFilter']()
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
filter_properties = {}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_retry_filter_pass(self):
"""Host not previously tried"""
filt_cls = self.class_map['RetryFilter']()
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
retry = dict(num_attempts=1, hosts=['host2', 'host3'])
filter_properties = dict(retry=retry)
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -1274,7 +1257,7 @@ class HostFiltersTestCase(test.TestCase):
def test_retry_filter_fail(self):
"""Host was already tried"""
filt_cls = self.class_map['RetryFilter']()
- host = fakes.FakeHostState('host1', 'compute', {})
+ host = fakes.FakeHostState('host1', 'node1', {})
retry = dict(num_attempts=1, hosts=['host3', 'host1'])
filter_properties = dict(retry=retry)
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@@ -1282,7 +1265,7 @@ class HostFiltersTestCase(test.TestCase):
def test_filter_num_iops_passes(self):
self.flags(max_io_ops_per_host=8)
filt_cls = self.class_map['IoOpsFilter']()
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'num_io_ops': 7})
filter_properties = {}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -1290,13 +1273,13 @@ class HostFiltersTestCase(test.TestCase):
def test_filter_num_iops_fails(self):
self.flags(max_io_ops_per_host=8)
filt_cls = self.class_map['IoOpsFilter']()
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'num_io_ops': 8})
def test_filter_num_instances_passes(self):
self.flags(max_instances_per_host=5)
filt_cls = self.class_map['NumInstancesFilter']()
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'num_instances': 4})
filter_properties = {}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
@@ -1304,7 +1287,7 @@ class HostFiltersTestCase(test.TestCase):
def test_filter_num_instances_fails(self):
self.flags(max_instances_per_host=5)
filt_cls = self.class_map['NumInstancesFilter']()
- host = fakes.FakeHostState('host1', 'compute',
+ host = fakes.FakeHostState('host1', 'node1',
{'num_instances': 5})
filter_properties = {}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
diff --git a/nova/tests/scheduler/test_host_manager.py b/nova/tests/scheduler/test_host_manager.py
index 74c24d56b..d12f1dea5 100644
--- a/nova/tests/scheduler/test_host_manager.py
+++ b/nova/tests/scheduler/test_host_manager.py
@@ -24,6 +24,7 @@ from nova import exception
from nova.openstack.common import timeutils
from nova.scheduler import host_manager
from nova import test
+from nova.tests import matchers
from nova.tests.scheduler import fakes
@@ -44,6 +45,10 @@ class HostManagerTestCase(test.TestCase):
super(HostManagerTestCase, self).setUp()
self.host_manager = host_manager.HostManager()
+ def tearDown(self):
+ timeutils.clear_time_override()
+ super(HostManagerTestCase, self).tearDown()
+
def test_choose_host_filters_not_found(self):
self.flags(scheduler_default_filters='ComputeFilterClass3')
self.host_manager.filter_classes = [ComputeFilterClass1,
@@ -56,77 +61,198 @@ class HostManagerTestCase(test.TestCase):
self.host_manager.filter_classes = [ComputeFilterClass1,
ComputeFilterClass2]
- # Test 'compute' returns 1 correct function
- filter_fns = self.host_manager._choose_host_filters(None)
- self.assertEqual(len(filter_fns), 1)
- self.assertEqual(filter_fns[0].__func__,
- ComputeFilterClass2.host_passes.__func__)
+ # Test we returns 1 correct function
+ filter_classes = self.host_manager._choose_host_filters(None)
+ self.assertEqual(len(filter_classes), 1)
+ self.assertEqual(filter_classes[0].__name__, 'ComputeFilterClass2')
+
+ def test_get_filtered_hosts(self):
+ fake_hosts = ['fake_host1', 'fake_host2', 'fake_host1',
+ 'fake_host1']
+ fake_classes = 'fake_classes'
+ fake_properties = {'moo': 1, 'cow': 2}
+ expected_hosts = set(fake_hosts)
+ fake_result = 'fake_result'
+
+ self.mox.StubOutWithMock(self.host_manager, '_choose_host_filters')
+ self.mox.StubOutWithMock(self.host_manager.filter_handler,
+ 'get_filtered_objects')
+
+ self.host_manager._choose_host_filters(None).AndReturn(fake_classes)
+ self.host_manager.filter_handler.get_filtered_objects(fake_classes,
+ expected_hosts, fake_properties).AndReturn(fake_result)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager. get_filtered_hosts(fake_hosts,
+ fake_properties)
+ self.assertEqual(result, fake_result)
+
+ def test_get_filtered_hosts_with_specificed_filters(self):
+ fake_hosts = ['fake_host1', 'fake_host2', 'fake_host1',
+ 'fake_host1']
+ fake_classes = 'fake_classes'
+ fake_properties = {'moo': 1, 'cow': 2}
+ fake_filters = 'fake_filters'
+ expected_hosts = set(fake_hosts)
+ fake_result = 'fake_result'
+
+ self.mox.StubOutWithMock(self.host_manager, '_choose_host_filters')
+ self.mox.StubOutWithMock(self.host_manager.filter_handler,
+ 'get_filtered_objects')
+
+ self.host_manager._choose_host_filters(fake_filters).AndReturn(
+ fake_classes)
+ self.host_manager.filter_handler.get_filtered_objects(fake_classes,
+ expected_hosts, fake_properties).AndReturn(fake_result)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(fake_hosts,
+ fake_properties, filter_class_names=fake_filters)
+ self.assertEqual(result, fake_result)
+
+ def test_get_filtered_hosts_with_ignore(self):
+ fake_hosts = ['fake_host1', 'fake_host2', 'fake_host1',
+ 'fake_host1', 'fake_host3', 'fake_host4']
+ fake_classes = 'fake_classes'
+ fake_properties = {'ignore_hosts': ['fake_host1', 'fake_host3',
+ 'fake_host5']}
+ expected_hosts = set(['fake_host2', 'fake_host4'])
+ fake_result = 'fake_result'
+
+ self.mox.StubOutWithMock(self.host_manager, '_choose_host_filters')
+ self.mox.StubOutWithMock(self.host_manager.filter_handler,
+ 'get_filtered_objects')
- def test_filter_hosts(self):
- topic = 'fake_topic'
+ self.host_manager._choose_host_filters(None).AndReturn(fake_classes)
+ self.host_manager.filter_handler.get_filtered_objects(fake_classes,
+ expected_hosts, fake_properties).AndReturn(fake_result)
- filters = ['fake-filter1', 'fake-filter2']
- fake_host1 = host_manager.HostState('host1', topic)
- fake_host2 = host_manager.HostState('host2', topic)
- hosts = [fake_host1, fake_host2]
- filter_properties = {'fake_prop': 'fake_val'}
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(fake_hosts,
+ fake_properties)
+ self.assertEqual(result, fake_result)
- self.mox.StubOutWithMock(self.host_manager,
- '_choose_host_filters')
- self.mox.StubOutWithMock(fake_host1, 'passes_filters')
- self.mox.StubOutWithMock(fake_host2, 'passes_filters')
+ def test_get_filtered_hosts_with_force_hosts(self):
+ fake_hosts = ['fake_host1', 'fake_host2', 'fake_host1',
+ 'fake_host1', 'fake_host3', 'fake_host4']
+ fake_classes = 'fake_classes'
+ fake_properties = {'force_hosts': ['fake_host1', 'fake_host3',
+ 'fake_host5']}
+ expected_hosts = set(['fake_host1', 'fake_host3'])
+ fake_result = 'fake_result'
- self.host_manager._choose_host_filters(None).AndReturn(filters)
- fake_host1.passes_filters(filters, filter_properties).AndReturn(
- False)
- fake_host2.passes_filters(filters, filter_properties).AndReturn(
- True)
+ self.mox.StubOutWithMock(self.host_manager, '_choose_host_filters')
+ self.mox.StubOutWithMock(self.host_manager.filter_handler,
+ 'get_filtered_objects')
+
+ self.host_manager._choose_host_filters(None).AndReturn(fake_classes)
+ self.host_manager.filter_handler.get_filtered_objects(fake_classes,
+ expected_hosts, fake_properties).AndReturn(fake_result)
self.mox.ReplayAll()
- filtered_hosts = self.host_manager.filter_hosts(hosts,
- filter_properties, filters=None)
- self.assertEqual(len(filtered_hosts), 1)
- self.assertEqual(filtered_hosts[0], fake_host2)
+
+ result = self.host_manager.get_filtered_hosts(fake_hosts,
+ fake_properties)
+ self.assertEqual(result, fake_result)
+
+ def test_get_filtered_hosts_with_no_matching_force_hosts(self):
+ fake_hosts = ['fake_host1', 'fake_host2', 'fake_host1',
+ 'fake_host1', 'fake_host3', 'fake_host4']
+ fake_classes = 'fake_classes'
+ fake_properties = {'force_hosts': ['fake_host5', 'fake_host6']}
+ expected_result = []
+
+ self.mox.StubOutWithMock(self.host_manager, '_choose_host_filters')
+ # Make sure this is not called.
+ self.mox.StubOutWithMock(self.host_manager.filter_handler,
+ 'get_filtered_objects')
+
+ self.host_manager._choose_host_filters(None).AndReturn(fake_classes)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(fake_hosts,
+ fake_properties)
+ self.assertEqual(result, expected_result)
+
+ def test_get_filtered_hosts_with_ignore_and_force(self):
+ """Ensure ignore_hosts processed before force_hosts in host filters"""
+ fake_hosts = ['fake_host1', 'fake_host2', 'fake_host1',
+ 'fake_host1', 'fake_host3', 'fake_host4']
+ fake_classes = 'fake_classes'
+ fake_properties = {'force_hosts': ['fake_host3', 'fake_host1'],
+ 'ignore_hosts': ['fake_host1']}
+ expected_hosts = set(['fake_host3'])
+ fake_result = 'fake_result'
+
+ self.mox.StubOutWithMock(self.host_manager, '_choose_host_filters')
+ # Make sure this is not called.
+ self.mox.StubOutWithMock(self.host_manager.filter_handler,
+ 'get_filtered_objects')
+ self.host_manager.filter_handler.get_filtered_objects(fake_classes,
+ expected_hosts, fake_properties).AndReturn(fake_result)
+
+ self.host_manager._choose_host_filters(None).AndReturn(fake_classes)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(fake_hosts,
+ fake_properties)
+ self.assertEqual(result, fake_result)
def test_update_service_capabilities(self):
service_states = self.host_manager.service_states
- self.assertDictMatch(service_states, {})
+ self.assertEqual(len(service_states.keys()), 0)
self.mox.StubOutWithMock(timeutils, 'utcnow')
timeutils.utcnow().AndReturn(31337)
- timeutils.utcnow().AndReturn(31338)
timeutils.utcnow().AndReturn(31339)
host1_compute_capabs = dict(free_memory=1234, host_memory=5678,
- timestamp=1)
- host1_volume_capabs = dict(free_disk=4321, timestamp=1)
- host2_compute_capabs = dict(free_memory=8756, timestamp=1)
+ timestamp=1, hypervisor_hostname='node1')
+ host2_compute_capabs = dict(free_memory=8756, timestamp=1,
+ hypervisor_hostname='node2')
self.mox.ReplayAll()
self.host_manager.update_service_capabilities('compute', 'host1',
host1_compute_capabs)
- self.host_manager.update_service_capabilities('volume', 'host1',
- host1_volume_capabs)
self.host_manager.update_service_capabilities('compute', 'host2',
host2_compute_capabs)
- # Make sure dictionary isn't re-assigned
- self.assertEqual(self.host_manager.service_states, service_states)
# Make sure original dictionary wasn't copied
self.assertEqual(host1_compute_capabs['timestamp'], 1)
host1_compute_capabs['timestamp'] = 31337
- host1_volume_capabs['timestamp'] = 31338
host2_compute_capabs['timestamp'] = 31339
- expected = {'host1': {'compute': host1_compute_capabs,
- 'volume': host1_volume_capabs},
- 'host2': {'compute': host2_compute_capabs}}
- self.assertDictMatch(service_states, expected)
+ expected = {('host1', 'node1'): host1_compute_capabs,
+ ('host2', 'node2'): host2_compute_capabs}
+ self.assertThat(service_states, matchers.DictMatches(expected))
+
+ def test_update_service_capabilities_node_key(self):
+ service_states = self.host_manager.service_states
+ self.assertThat(service_states, matchers.DictMatches({}))
+
+ host1_cap = {'hypervisor_hostname': 'host1-hvhn'}
+ host2_cap = {}
+
+ timeutils.set_time_override(31337)
+ self.host_manager.update_service_capabilities('compute', 'host1',
+ host1_cap)
+ timeutils.set_time_override(31338)
+ self.host_manager.update_service_capabilities('compute', 'host2',
+ host2_cap)
+ host1_cap['timestamp'] = 31337
+ host2_cap['timestamp'] = 31338
+ expected = {('host1', 'host1-hvhn'): host1_cap,
+ ('host2', None): host2_cap}
+ self.assertThat(service_states, matchers.DictMatches(expected))
def test_get_all_host_states(self):
context = 'fake_context'
- topic = 'compute'
self.mox.StubOutWithMock(db, 'compute_node_get_all')
self.mox.StubOutWithMock(host_manager.LOG, 'warn')
@@ -136,27 +262,38 @@ class HostManagerTestCase(test.TestCase):
host_manager.LOG.warn("No service for compute ID 5")
self.mox.ReplayAll()
- host_states = self.host_manager.get_all_host_states(context, topic)
+ self.host_manager.get_all_host_states(context)
+ host_states_map = self.host_manager.host_state_map
- self.assertEqual(len(host_states), 4)
+ self.assertEqual(len(host_states_map), 4)
# Check that .service is set properly
for i in xrange(4):
compute_node = fakes.COMPUTE_NODES[i]
host = compute_node['service']['host']
- self.assertEqual(host_states[host].service,
+ node = compute_node['hypervisor_hostname']
+ state_key = (host, node)
+ self.assertEqual(host_states_map[state_key].service,
compute_node['service'])
- self.assertEqual(host_states['host1'].free_ram_mb, 512)
+ self.assertEqual(host_states_map[('host1', 'node1')].free_ram_mb,
+ 512)
# 511GB
- self.assertEqual(host_states['host1'].free_disk_mb, 524288)
- self.assertEqual(host_states['host2'].free_ram_mb, 1024)
+ self.assertEqual(host_states_map[('host1', 'node1')].free_disk_mb,
+ 524288)
+ self.assertEqual(host_states_map[('host2', 'node2')].free_ram_mb,
+ 1024)
# 1023GB
- self.assertEqual(host_states['host2'].free_disk_mb, 1048576)
- self.assertEqual(host_states['host3'].free_ram_mb, 3072)
+ self.assertEqual(host_states_map[('host2', 'node2')].free_disk_mb,
+ 1048576)
+ self.assertEqual(host_states_map[('host3', 'node3')].free_ram_mb,
+ 3072)
# 3071GB
- self.assertEqual(host_states['host3'].free_disk_mb, 3145728)
- self.assertEqual(host_states['host4'].free_ram_mb, 8192)
+ self.assertEqual(host_states_map[('host3', 'node3')].free_disk_mb,
+ 3145728)
+ self.assertEqual(host_states_map[('host4', 'node4')].free_ram_mb,
+ 8192)
# 8191GB
- self.assertEqual(host_states['host4'].free_disk_mb, 8388608)
+ self.assertEqual(host_states_map[('host4', 'node4')].free_disk_mb,
+ 8388608)
class HostStateTestCase(test.TestCase):
@@ -165,91 +302,6 @@ class HostStateTestCase(test.TestCase):
# update_from_compute_node() and consume_from_instance() are tested
# in HostManagerTestCase.test_get_all_host_states()
- def test_host_state_passes_filters_passes(self):
- fake_host = host_manager.HostState('host1', 'compute')
- filter_properties = {}
-
- cls1 = ComputeFilterClass1()
- cls2 = ComputeFilterClass2()
- self.mox.StubOutWithMock(cls1, 'host_passes')
- self.mox.StubOutWithMock(cls2, 'host_passes')
- filter_fns = [cls1.host_passes, cls2.host_passes]
-
- cls1.host_passes(fake_host, filter_properties).AndReturn(True)
- cls2.host_passes(fake_host, filter_properties).AndReturn(True)
-
- self.mox.ReplayAll()
- result = fake_host.passes_filters(filter_fns, filter_properties)
- self.assertTrue(result)
-
- def test_host_state_passes_filters_passes_with_ignore(self):
- fake_host = host_manager.HostState('host1', 'compute')
- filter_properties = {'ignore_hosts': ['host2']}
-
- cls1 = ComputeFilterClass1()
- cls2 = ComputeFilterClass2()
- self.mox.StubOutWithMock(cls1, 'host_passes')
- self.mox.StubOutWithMock(cls2, 'host_passes')
- filter_fns = [cls1.host_passes, cls2.host_passes]
-
- cls1.host_passes(fake_host, filter_properties).AndReturn(True)
- cls2.host_passes(fake_host, filter_properties).AndReturn(True)
-
- self.mox.ReplayAll()
- result = fake_host.passes_filters(filter_fns, filter_properties)
- self.assertTrue(result)
-
- def test_host_state_passes_filters_fails(self):
- fake_host = host_manager.HostState('host1', 'compute')
- filter_properties = {}
-
- cls1 = ComputeFilterClass1()
- cls2 = ComputeFilterClass2()
- self.mox.StubOutWithMock(cls1, 'host_passes')
- self.mox.StubOutWithMock(cls2, 'host_passes')
- filter_fns = [cls1.host_passes, cls2.host_passes]
-
- cls1.host_passes(fake_host, filter_properties).AndReturn(False)
- # cls2.host_passes() not called because of short circuit
-
- self.mox.ReplayAll()
- result = fake_host.passes_filters(filter_fns, filter_properties)
- self.assertFalse(result)
-
- def test_host_state_passes_filters_fails_from_ignore(self):
- fake_host = host_manager.HostState('host1', 'compute')
- filter_properties = {'ignore_hosts': ['host1']}
-
- cls1 = ComputeFilterClass1()
- cls2 = ComputeFilterClass2()
- self.mox.StubOutWithMock(cls1, 'host_passes')
- self.mox.StubOutWithMock(cls2, 'host_passes')
- filter_fns = [cls1.host_passes, cls2.host_passes]
-
- # cls[12].host_passes() not called because of short circuit
- # with matching host to ignore
-
- self.mox.ReplayAll()
- result = fake_host.passes_filters(filter_fns, filter_properties)
- self.assertFalse(result)
-
- def test_host_state_passes_filters_skipped_from_force(self):
- fake_host = host_manager.HostState('host1', 'compute')
- filter_properties = {'force_hosts': ['host1']}
-
- cls1 = ComputeFilterClass1()
- cls2 = ComputeFilterClass2()
- self.mox.StubOutWithMock(cls1, 'host_passes')
- self.mox.StubOutWithMock(cls2, 'host_passes')
- filter_fns = [cls1.host_passes, cls2.host_passes]
-
- # cls[12].host_passes() not called because of short circuit
- # with matching host to force
-
- self.mox.ReplayAll()
- result = fake_host.passes_filters(filter_fns, filter_properties)
- self.assertTrue(result)
-
def test_stat_consumption_from_compute_node(self):
stats = [
dict(key='num_instances', value='5'),
@@ -267,7 +319,7 @@ class HostStateTestCase(test.TestCase):
local_gb_used=0, free_ram_mb=0, vcpus=0, vcpus_used=0,
updated_at=None)
- host = host_manager.HostState("fakehost", "faketopic")
+ host = host_manager.HostState("fakehost", "fakenode")
host.update_from_compute_node(compute)
self.assertEqual(5, host.num_instances)
@@ -282,7 +334,7 @@ class HostStateTestCase(test.TestCase):
self.assertEqual(42, host.num_io_ops)
def test_stat_consumption_from_instance(self):
- host = host_manager.HostState("fakehost", "faketopic")
+ host = host_manager.HostState("fakehost", "fakenode")
instance = dict(root_gb=0, ephemeral_gb=0, memory_mb=0, vcpus=0,
project_id='12345', vm_state=vm_states.BUILDING,
diff --git a/nova/tests/scheduler/test_least_cost.py b/nova/tests/scheduler/test_least_cost.py
index df4e13244..f8ed20b43 100644
--- a/nova/tests/scheduler/test_least_cost.py
+++ b/nova/tests/scheduler/test_least_cost.py
@@ -1,4 +1,4 @@
-# Copyright 2011 OpenStack LLC.
+# Copyright 2011-2012 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -15,39 +15,94 @@
"""
Tests For Least Cost functions.
"""
+from nova import config
from nova import context
-from nova.scheduler import host_manager
-from nova.scheduler import least_cost
+from nova.openstack.common import cfg
+from nova.scheduler import weights
+from nova.scheduler.weights import least_cost
from nova import test
from nova.tests.scheduler import fakes
-def offset(hostinfo, options):
+test_least_cost_opts = [
+ cfg.FloatOpt('compute_fake_weigher1_weight',
+ default=2.0,
+ help='How much weight to give the fake_weigher1 function'),
+ cfg.FloatOpt('compute_fake_weigher2_weight',
+ default=1.0,
+ help='How much weight to give the fake_weigher2 function'),
+ ]
+
+CONF = config.CONF
+CONF.import_opt('least_cost_functions', 'nova.scheduler.weights.least_cost')
+CONF.import_opt('compute_fill_first_cost_fn_weight',
+ 'nova.scheduler.weights.least_cost')
+CONF.register_opts(test_least_cost_opts)
+
+
+def compute_fake_weigher1(hostinfo, options):
return hostinfo.free_ram_mb + 10000
-def scale(hostinfo, options):
+def compute_fake_weigher2(hostinfo, options):
return hostinfo.free_ram_mb * 2
class LeastCostTestCase(test.TestCase):
def setUp(self):
super(LeastCostTestCase, self).setUp()
- self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.host_manager = fakes.FakeHostManager()
+ self.weight_handler = weights.HostWeightHandler()
+
+ def _get_weighed_host(self, hosts, weight_properties=None):
+ weigher_classes = least_cost.get_least_cost_weighers()
+ if weight_properties is None:
+ weight_properties = {}
+ return self.weight_handler.get_weighed_objects(weigher_classes,
+ hosts, weight_properties)[0]
def _get_all_hosts(self):
ctxt = context.get_admin_context()
fakes.mox_host_manager_db_calls(self.mox, ctxt)
self.mox.ReplayAll()
- host_states = self.host_manager.get_all_host_states(ctxt,
- 'compute').values()
+ host_states = self.host_manager.get_all_host_states(ctxt)
self.mox.VerifyAll()
self.mox.ResetAll()
return host_states
- def test_weighted_sum_happy_day(self):
- fn_tuples = [(1.0, offset), (1.0, scale)]
+ def test_default_of_spread_first(self):
+ # Default modifier is -1.0, so it turns out that hosts with
+ # the most free memory win
+ hostinfo_list = self._get_all_hosts()
+
+ # host1: free_ram_mb=512
+ # host2: free_ram_mb=1024
+ # host3: free_ram_mb=3072
+ # host4: free_ram_mb=8192
+
+ # so, host1 should win:
+ weighed_host = self._get_weighed_host(hostinfo_list)
+ self.assertEqual(weighed_host.weight, 8192)
+ self.assertEqual(weighed_host.obj.host, 'host4')
+
+ def test_filling_first(self):
+ self.flags(compute_fill_first_cost_fn_weight=1.0)
+ hostinfo_list = self._get_all_hosts()
+
+ # host1: free_ram_mb=-512
+ # host2: free_ram_mb=-1024
+ # host3: free_ram_mb=-3072
+ # host4: free_ram_mb=-8192
+
+ # so, host1 should win:
+ weighed_host = self._get_weighed_host(hostinfo_list)
+ self.assertEqual(weighed_host.weight, -512)
+ self.assertEqual(weighed_host.obj.host, 'host1')
+
+ def test_weighted_sum_provided_method(self):
+ fns = ['nova.tests.scheduler.test_least_cost.compute_fake_weigher1',
+ 'nova.tests.scheduler.test_least_cost.compute_fake_weigher2']
+ self.flags(least_cost_functions=fns)
hostinfo_list = self._get_all_hosts()
# host1: free_ram_mb=512
@@ -59,18 +114,17 @@ class LeastCostTestCase(test.TestCase):
# [10512, 11024, 13072, 18192]
# [1024, 2048, 6144, 16384]
- # adjusted [ 1.0 * x + 1.0 * y] =
- # [11536, 13072, 19216, 34576]
+ # adjusted [ 2.0 * x + 1.0 * y] =
+ # [22048, 24096, 32288, 52768]
# so, host1 should win:
- options = {}
- weighted_host = least_cost.weighted_sum(fn_tuples, hostinfo_list,
- options)
- self.assertEqual(weighted_host.weight, 11536)
- self.assertEqual(weighted_host.host_state.host, 'host1')
+ weighed_host = self._get_weighed_host(hostinfo_list)
+ self.assertEqual(weighed_host.weight, 52768)
+ self.assertEqual(weighed_host.obj.host, 'host4')
def test_weighted_sum_single_function(self):
- fn_tuples = [(1.0, offset), ]
+ fns = ['nova.tests.scheduler.test_least_cost.compute_fake_weigher1']
+ self.flags(least_cost_functions=fns)
hostinfo_list = self._get_all_hosts()
# host1: free_ram_mb=0
@@ -80,24 +134,10 @@ class LeastCostTestCase(test.TestCase):
# [offset, ]=
# [10512, 11024, 13072, 18192]
+ # adjusted [ 2.0 * x ]=
+ # [21024, 22048, 26144, 36384]
# so, host1 should win:
- options = {}
- weighted_host = least_cost.weighted_sum(fn_tuples, hostinfo_list,
- options)
- self.assertEqual(weighted_host.weight, 10512)
- self.assertEqual(weighted_host.host_state.host, 'host1')
-
-
-class TestWeightedHost(test.TestCase):
- def test_dict_conversion_without_host_state(self):
- host = least_cost.WeightedHost('someweight')
- expected = {'weight': 'someweight'}
- self.assertDictMatch(host.to_dict(), expected)
-
- def test_dict_conversion_with_host_state(self):
- host_state = host_manager.HostState('somehost', 'sometopic')
- host = least_cost.WeightedHost('someweight', host_state)
- expected = {'weight': 'someweight',
- 'host': 'somehost'}
- self.assertDictMatch(host.to_dict(), expected)
+ weighed_host = self._get_weighed_host(hostinfo_list)
+ self.assertEqual(weighed_host.weight, 36384)
+ self.assertEqual(weighed_host.obj.host, 'host4')
diff --git a/nova/tests/scheduler/test_rpcapi.py b/nova/tests/scheduler/test_rpcapi.py
index 8cf741118..05a792a53 100644
--- a/nova/tests/scheduler/test_rpcapi.py
+++ b/nova/tests/scheduler/test_rpcapi.py
@@ -18,14 +18,13 @@
Unit Tests for nova.scheduler.rpcapi
"""
+from nova import config
from nova import context
-from nova import flags
from nova.openstack.common import rpc
from nova.scheduler import rpcapi as scheduler_rpcapi
from nova import test
-
-FLAGS = flags.FLAGS
+CONF = config.CONF
class SchedulerRpcAPITestCase(test.TestCase):
@@ -51,7 +50,7 @@ class SchedulerRpcAPITestCase(test.TestCase):
retval = getattr(rpcapi, method)(ctxt, **kwargs)
self.assertEqual(retval, expected_retval)
- expected_args = [ctxt, FLAGS.scheduler_topic, expected_msg]
+ expected_args = [ctxt, CONF.scheduler_topic, expected_msg]
for arg, expected_arg in zip(self.fake_args, expected_args):
self.assertEqual(arg, expected_arg)
diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py
index 6a0e93b7d..76d438f2e 100644
--- a/nova/tests/scheduler/test_scheduler.py
+++ b/nova/tests/scheduler/test_scheduler.py
@@ -26,21 +26,20 @@ from nova.compute import power_state
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import utils as compute_utils
from nova.compute import vm_states
+from nova import config
from nova import context
from nova import db
from nova import exception
-from nova import flags
from nova.openstack.common import jsonutils
from nova.openstack.common import rpc
from nova.openstack.common import timeutils
from nova.scheduler import driver
from nova.scheduler import manager
from nova import test
+from nova.tests import matchers
from nova.tests.scheduler import fakes
from nova import utils
-FLAGS = flags.FLAGS
-
class SchedulerManagerTestCase(test.TestCase):
"""Test case for scheduler manager"""
@@ -138,7 +137,7 @@ class SchedulerManagerTestCase(test.TestCase):
'local_gb_used': 512,
'memory_mb': 1024,
'memory_mb_used': 512}}
- self.assertDictMatch(result, expected)
+ self.assertThat(result, matchers.DictMatches(expected))
def _mox_schedule_method_helper(self, method_name):
# Make sure the method exists that we're going to test call
@@ -721,7 +720,7 @@ class SchedulerDriverModuleTestCase(test.TestCase):
result = driver.encode_instance(instance, True)
expected = {'id': instance['id'], '_is_precooked': False}
- self.assertDictMatch(result, expected)
+ self.assertThat(result, matchers.DictMatches(expected))
# Orig dict not changed
self.assertNotEqual(result, instance)
@@ -729,6 +728,6 @@ class SchedulerDriverModuleTestCase(test.TestCase):
expected = {}
expected.update(instance)
expected['_is_precooked'] = True
- self.assertDictMatch(result, expected)
+ self.assertThat(result, matchers.DictMatches(expected))
# Orig dict not changed
self.assertNotEqual(result, instance)
diff --git a/nova/tests/scheduler/test_weights.py b/nova/tests/scheduler/test_weights.py
new file mode 100644
index 000000000..8699ed811
--- /dev/null
+++ b/nova/tests/scheduler/test_weights.py
@@ -0,0 +1,117 @@
+# Copyright 2011-2012 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Scheduler weights.
+"""
+
+from nova import context
+from nova.scheduler import weights
+from nova import test
+from nova.tests import matchers
+from nova.tests.scheduler import fakes
+
+
+class TestWeighedHost(test.TestCase):
+ def test_dict_conversion(self):
+ host_state = fakes.FakeHostState('somehost', None, {})
+ host = weights.WeighedHost(host_state, 'someweight')
+ expected = {'weight': 'someweight',
+ 'host': 'somehost'}
+ self.assertThat(host.to_dict(), matchers.DictMatches(expected))
+
+ def test_all_weighers(self):
+ classes = weights.all_weighers()
+ class_names = [cls.__name__ for cls in classes]
+ self.assertEqual(len(classes), 1)
+ self.assertIn('RAMWeigher', class_names)
+
+ def test_all_weighers_with_deprecated_config1(self):
+ self.flags(compute_fill_first_cost_fn_weight=-1.0)
+ classes = weights.all_weighers()
+ class_names = [cls.__name__ for cls in classes]
+ self.assertEqual(len(classes), 1)
+ self.assertIn('_LeastCostWeigher', class_names)
+
+ def test_all_weighers_with_deprecated_config2(self):
+ self.flags(least_cost_functions=['something'])
+ classes = weights.all_weighers()
+ class_names = [cls.__name__ for cls in classes]
+ self.assertEqual(len(classes), 1)
+ self.assertIn('_LeastCostWeigher', class_names)
+
+
+class RamWeigherTestCase(test.TestCase):
+ def setUp(self):
+ super(RamWeigherTestCase, self).setUp()
+ self.host_manager = fakes.FakeHostManager()
+ self.weight_handler = weights.HostWeightHandler()
+ self.weight_classes = self.weight_handler.get_matching_classes(
+ ['nova.scheduler.weights.ram.RAMWeigher'])
+
+ def _get_weighed_host(self, hosts, weight_properties=None):
+ if weight_properties is None:
+ weight_properties = {}
+ return self.weight_handler.get_weighed_objects(self.weight_classes,
+ hosts, weight_properties)[0]
+
+ def _get_all_hosts(self):
+ ctxt = context.get_admin_context()
+ fakes.mox_host_manager_db_calls(self.mox, ctxt)
+ self.mox.ReplayAll()
+ host_states = self.host_manager.get_all_host_states(ctxt)
+ self.mox.VerifyAll()
+ self.mox.ResetAll()
+ return host_states
+
+ def test_default_of_spreading_first(self):
+ hostinfo_list = self._get_all_hosts()
+
+ # host1: free_ram_mb=512
+ # host2: free_ram_mb=1024
+ # host3: free_ram_mb=3072
+ # host4: free_ram_mb=8192
+
+ # so, host4 should win:
+ weighed_host = self._get_weighed_host(hostinfo_list)
+ self.assertEqual(weighed_host.weight, 8192)
+ self.assertEqual(weighed_host.obj.host, 'host4')
+
+ def test_ram_filter_multiplier1(self):
+ self.flags(ram_weight_multiplier=-1.0)
+ hostinfo_list = self._get_all_hosts()
+
+ # host1: free_ram_mb=-512
+ # host2: free_ram_mb=-1024
+ # host3: free_ram_mb=-3072
+ # host4: free_ram_mb=-8192
+
+ # so, host1 should win:
+ weighed_host = self._get_weighed_host(hostinfo_list)
+ self.assertEqual(weighed_host.weight, -512)
+ self.assertEqual(weighed_host.obj.host, 'host1')
+
+ def test_ram_filter_multiplier2(self):
+ self.flags(ram_weight_multiplier=2.0)
+ hostinfo_list = self._get_all_hosts()
+
+ # host1: free_ram_mb=512 * 2
+ # host2: free_ram_mb=1024 * 2
+ # host3: free_ram_mb=3072 * 2
+ # host4: free_ram_mb=8192 * 2
+
+ # so, host4 should win:
+ weighed_host = self._get_weighed_host(hostinfo_list)
+ self.assertEqual(weighed_host.weight, 8192 * 2)
+ self.assertEqual(weighed_host.obj.host, 'host4')
diff --git a/nova/tests/test_api.py b/nova/tests/test_api.py
index 6e0a97c0c..63630325b 100644
--- a/nova/tests/test_api.py
+++ b/nova/tests/test_api.py
@@ -36,14 +36,12 @@ from nova.api import ec2
from nova.api.ec2 import apirequest
from nova.api.ec2 import ec2utils
from nova import block_device
+from nova import config
from nova import context
from nova import exception
-from nova import flags
from nova.openstack.common import timeutils
from nova import test
-
-
-FLAGS = flags.FLAGS
+from nova.tests import matchers
class FakeHttplibSocket(object):
@@ -163,7 +161,7 @@ class Ec2utilsTestCase(test.TestCase):
'virtual_name': 'ephemeral0'}}}
out_dict = ec2utils.dict_from_dotted_str(in_str)
- self.assertDictMatch(out_dict, expected_dict)
+ self.assertThat(out_dict, matchers.DictMatches(expected_dict))
def test_properties_root_defice_name(self):
mappings = [{"device": "/dev/sda1", "virtual": "root"}]
@@ -209,8 +207,8 @@ class Ec2utilsTestCase(test.TestCase):
'device': '/dev/sdc1'},
{'virtual': 'ephemeral1',
'device': '/dev/sdc1'}]
- self.assertDictListMatch(block_device.mappings_prepend_dev(mappings),
- expected_result)
+ self.assertThat(block_device.mappings_prepend_dev(mappings),
+ matchers.DictListMatches(expected_result))
class ApiEc2TestCase(test.TestCase):
diff --git a/nova/tests/test_bdm.py b/nova/tests/test_bdm.py
index 381ed8070..2d0349534 100644
--- a/nova/tests/test_bdm.py
+++ b/nova/tests/test_bdm.py
@@ -22,6 +22,7 @@ Tests for Block Device Mapping Code.
from nova.api.ec2 import cloud
from nova.api.ec2 import ec2utils
from nova import test
+from nova.tests import matchers
class BlockDeviceMappingEc2CloudTestCase(test.TestCase):
@@ -41,7 +42,7 @@ class BlockDeviceMappingEc2CloudTestCase(test.TestCase):
def _assertApply(self, action, bdm_list):
for bdm, expected_result in bdm_list:
- self.assertDictMatch(action(bdm), expected_result)
+ self.assertThat(action(bdm), matchers.DictMatches(expected_result))
def test_parse_block_device_mapping(self):
self.stubs.Set(ec2utils,
diff --git a/nova/tests/test_configdrive2.py b/nova/tests/test_configdrive2.py
index 922dc3613..4973b3712 100644
--- a/nova/tests/test_configdrive2.py
+++ b/nova/tests/test_configdrive2.py
@@ -22,14 +22,11 @@ import tempfile
from nova import test
-from nova import flags
+from nova import config
from nova.openstack.common import log
from nova import utils
from nova.virt import configdrive
-
-FLAGS = flags.FLAGS
-
LOG = log.getLogger(__name__)
diff --git a/nova/tests/test_crypto.py b/nova/tests/test_crypto.py
index c725079d2..78fa420aa 100644
--- a/nova/tests/test_crypto.py
+++ b/nova/tests/test_crypto.py
@@ -20,15 +20,13 @@ import os
import mox
+from nova import config
from nova import crypto
from nova import db
from nova import exception
-from nova import flags
from nova import test
from nova import utils
-FLAGS = flags.FLAGS
-
class X509Test(test.TestCase):
def test_can_generate_x509(self):
diff --git a/nova/tests/test_db_api.py b/nova/tests/test_db_api.py
index 76bd1f031..193378557 100644
--- a/nova/tests/test_db_api.py
+++ b/nova/tests/test_db_api.py
@@ -20,16 +20,20 @@
"""Unit tests for the DB API"""
import datetime
+import uuid as stdlib_uuid
+from nova import config
from nova import context
from nova import db
from nova import exception
-from nova import flags
from nova.openstack.common import timeutils
from nova import test
-from nova import utils
+from nova.tests import matchers
-FLAGS = flags.FLAGS
+
+CONF = config.CONF
+CONF.import_opt('reserved_host_memory_mb', 'nova.compute.resource_tracker')
+CONF.import_opt('reserved_host_disk_mb', 'nova.compute.resource_tracker')
class DbApiTestCase(test.TestCase):
@@ -141,7 +145,7 @@ class DbApiTestCase(test.TestCase):
self.assertRaises(exception.MarkerNotFound,
db.instance_get_all_by_filters,
self.context, {'display_name': '%test%'},
- marker=str(utils.gen_uuid()))
+ marker=str(stdlib_uuid.uuid4()))
def test_migration_get_unconfirmed_by_dest_compute(self):
ctxt = context.get_admin_context()
@@ -267,6 +271,26 @@ class DbApiTestCase(test.TestCase):
system_meta = db.instance_system_metadata_get(ctxt, instance.uuid)
self.assertEqual('baz', system_meta['original_image_ref'])
+ def test_instance_update_of_instance_type_id(self):
+ ctxt = context.get_admin_context()
+
+ inst_type1 = db.instance_type_get_by_name(ctxt, 'm1.tiny')
+ inst_type2 = db.instance_type_get_by_name(ctxt, 'm1.small')
+
+ values = {'instance_type_id': inst_type1['id']}
+ instance = db.instance_create(ctxt, values)
+
+ self.assertEqual(instance['instance_type']['id'], inst_type1['id'])
+ self.assertEqual(instance['instance_type']['name'],
+ inst_type1['name'])
+
+ values = {'instance_type_id': inst_type2['id']}
+ instance = db.instance_update(ctxt, instance['uuid'], values)
+
+ self.assertEqual(instance['instance_type']['id'], inst_type2['id'])
+ self.assertEqual(instance['instance_type']['name'],
+ inst_type2['name'])
+
def test_instance_update_with_and_get_original(self):
ctxt = context.get_admin_context()
@@ -279,10 +303,45 @@ class DbApiTestCase(test.TestCase):
self.assertEquals("building", old_ref["vm_state"])
self.assertEquals("needscoffee", new_ref["vm_state"])
+ def test_instance_update_with_extra_specs(self):
+ """Ensure _extra_specs are returned from _instance_update"""
+ ctxt = context.get_admin_context()
+
+ # create a flavor
+ inst_type_dict = dict(
+ name="test_flavor",
+ memory_mb=1,
+ vcpus=1,
+ root_gb=1,
+ ephemeral_gb=1,
+ flavorid=105)
+ inst_type_ref = db.instance_type_create(ctxt, inst_type_dict)
+
+ # add some extra spec to our flavor
+ spec = {'test_spec': 'foo'}
+ db.instance_type_extra_specs_update_or_create(
+ ctxt,
+ inst_type_ref['flavorid'],
+ spec)
+
+ # create instance, just populates db, doesn't pull extra_spec
+ instance = db.instance_create(
+ ctxt,
+ {'instance_type_id': inst_type_ref['id']})
+ self.assertNotIn('extra_specs', instance)
+
+ # update instance, used when starting instance to set state, etc
+ (old_ref, new_ref) = db.instance_update_and_get_original(
+ ctxt,
+ instance['uuid'],
+ {})
+ self.assertEquals(spec, old_ref['extra_specs'])
+ self.assertEquals(spec, new_ref['extra_specs'])
+
def test_instance_fault_create(self):
"""Ensure we can create an instance fault"""
ctxt = context.get_admin_context()
- uuid = str(utils.gen_uuid())
+ uuid = str(stdlib_uuid.uuid4())
# Create a fault
fault_values = {
@@ -377,6 +436,10 @@ class DbApiTestCase(test.TestCase):
self.assertEqual(project, self.project_id)
self.assertEqual(scope, 'public')
+ expected = [domain1, domain2]
+ domains = db.dnsdomain_list(ctxt)
+ self.assertEqual(expected, domains)
+
db.dnsdomain_unregister(ctxt, domain1)
db.dnsdomain_unregister(ctxt, domain2)
@@ -614,14 +677,16 @@ class AggregateDBApiTestCase(test.TestCase):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
expected_metadata = db.aggregate_metadata_get(ctxt, result['id'])
- self.assertDictMatch(expected_metadata, _get_fake_aggr_metadata())
+ self.assertThat(expected_metadata,
+ matchers.DictMatches(_get_fake_aggr_metadata()))
def test_aggregate_create_delete_create_with_metadata(self):
"""Ensure aggregate metadata is deleted bug 1052479."""
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
expected_metadata = db.aggregate_metadata_get(ctxt, result['id'])
- self.assertDictMatch(expected_metadata, _get_fake_aggr_metadata())
+ self.assertThat(expected_metadata,
+ matchers.DictMatches(_get_fake_aggr_metadata()))
db.aggregate_delete(ctxt, result['id'])
result = _create_aggregate(metadata=None)
expected_metadata = db.aggregate_metadata_get(ctxt, result['id'])
@@ -743,7 +808,8 @@ class AggregateDBApiTestCase(test.TestCase):
values['metadata'] = _get_fake_aggr_metadata()
db.aggregate_update(ctxt, 1, values)
expected = db.aggregate_metadata_get(ctxt, result.id)
- self.assertDictMatch(_get_fake_aggr_metadata(), expected)
+ self.assertThat(_get_fake_aggr_metadata(),
+ matchers.DictMatches(expected))
def test_aggregate_update_with_existing_metadata(self):
"""Ensure an aggregate can be updated with existing metadata."""
@@ -754,7 +820,7 @@ class AggregateDBApiTestCase(test.TestCase):
values['metadata']['fake_key1'] = 'foo'
db.aggregate_update(ctxt, 1, values)
expected = db.aggregate_metadata_get(ctxt, result.id)
- self.assertDictMatch(values['metadata'], expected)
+ self.assertThat(values['metadata'], matchers.DictMatches(expected))
def test_aggregate_update_raise_not_found(self):
"""Ensure AggregateNotFound is raised when updating an aggregate."""
@@ -800,7 +866,7 @@ class AggregateDBApiTestCase(test.TestCase):
metadata = _get_fake_aggr_metadata()
db.aggregate_metadata_add(ctxt, result.id, metadata)
expected = db.aggregate_metadata_get(ctxt, result.id)
- self.assertDictMatch(metadata, expected)
+ self.assertThat(metadata, matchers.DictMatches(expected))
def test_aggregate_metadata_update(self):
"""Ensure we can update metadata for the aggregate."""
@@ -813,7 +879,7 @@ class AggregateDBApiTestCase(test.TestCase):
db.aggregate_metadata_add(ctxt, result.id, new_metadata)
expected = db.aggregate_metadata_get(ctxt, result.id)
metadata[key] = 'foo'
- self.assertDictMatch(metadata, expected)
+ self.assertThat(metadata, matchers.DictMatches(expected))
def test_aggregate_metadata_delete(self):
"""Ensure we can delete metadata for the aggregate."""
@@ -824,7 +890,7 @@ class AggregateDBApiTestCase(test.TestCase):
db.aggregate_metadata_delete(ctxt, result.id, metadata.keys()[0])
expected = db.aggregate_metadata_get(ctxt, result.id)
del metadata[metadata.keys()[0]]
- self.assertDictMatch(metadata, expected)
+ self.assertThat(metadata, matchers.DictMatches(expected))
def test_aggregate_metadata_delete_raise_not_found(self):
"""Ensure AggregateMetadataNotFound is raised when deleting."""
@@ -1002,6 +1068,55 @@ class CapacityTestCase(test.TestCase):
self.assertEqual(1, int(stat['value']))
+class MigrationTestCase(test.TestCase):
+
+ def setUp(self):
+ super(MigrationTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+
+ self._create()
+ self._create()
+ self._create(status='reverted')
+ self._create(status='confirmed')
+ self._create(source_compute='host2', dest_compute='host1')
+ self._create(source_compute='host2', dest_compute='host3')
+ self._create(source_compute='host3', dest_compute='host4')
+
+ def _create(self, status='migrating', source_compute='host1',
+ dest_compute='host2'):
+
+ values = {'host': source_compute}
+ instance = db.instance_create(self.ctxt, values)
+
+ values = {'status': status, 'source_compute': source_compute,
+ 'dest_compute': dest_compute,
+ 'instance_uuid': instance['uuid']}
+ db.migration_create(self.ctxt, values)
+
+ def _assert_in_progress(self, migrations):
+ for migration in migrations:
+ self.assertNotEqual('confirmed', migration.status)
+ self.assertNotEqual('reverted', migration.status)
+
+ def test_in_progress_host1(self):
+ migrations = db.migration_get_in_progress_by_host(self.ctxt, 'host1')
+ # 2 as source + 1 as dest
+ self.assertEqual(3, len(migrations))
+ self._assert_in_progress(migrations)
+
+ def test_in_progress_host2(self):
+ migrations = db.migration_get_in_progress_by_host(self.ctxt, 'host2')
+ # 2 as dest, 2 as source
+ self.assertEqual(4, len(migrations))
+ self._assert_in_progress(migrations)
+
+ def test_instance_join(self):
+ migrations = db.migration_get_in_progress_by_host(self.ctxt, 'host2')
+ for migration in migrations:
+ instance = migration['instance']
+ self.assertEqual(migration['instance_uuid'], instance['uuid'])
+
+
class TestIpAllocation(test.TestCase):
def setUp(self):
@@ -1077,187 +1192,3 @@ class InstanceDestroyConstraints(test.TestCase):
ctx, instance['uuid'], constraint)
instance = db.instance_get_by_uuid(ctx, instance['uuid'])
self.assertFalse(instance['deleted'])
-
-
-def _get_sm_backend_params():
- config_params = ("name_label=testsmbackend "
- "server=localhost "
- "serverpath=/tmp/nfspath")
- params = dict(flavor_id=1,
- sr_uuid=None,
- sr_type='nfs',
- config_params=config_params)
- return params
-
-
-def _get_sm_flavor_params():
- params = dict(label="gold",
- description="automatic backups")
- return params
-
-
-class SMVolumeDBApiTestCase(test.TestCase):
- def setUp(self):
- super(SMVolumeDBApiTestCase, self).setUp()
- self.user_id = 'fake'
- self.project_id = 'fake'
- self.context = context.RequestContext(self.user_id, self.project_id)
-
- def test_sm_backend_conf_create(self):
- params = _get_sm_backend_params()
- ctxt = context.get_admin_context()
- beconf = db.sm_backend_conf_create(ctxt,
- params)
- self.assertIsInstance(beconf['id'], int)
-
- def test_sm_backend_conf_create_raise_duplicate(self):
- params = _get_sm_backend_params()
- ctxt = context.get_admin_context()
- beconf = db.sm_backend_conf_create(ctxt,
- params)
- self.assertIsInstance(beconf['id'], int)
- self.assertRaises(exception.Duplicate,
- db.sm_backend_conf_create,
- ctxt,
- params)
-
- def test_sm_backend_conf_update(self):
- ctxt = context.get_admin_context()
- params = _get_sm_backend_params()
- beconf = db.sm_backend_conf_create(ctxt,
- params)
- beconf = db.sm_backend_conf_update(ctxt,
- beconf['id'],
- dict(sr_uuid="FA15E-1D"))
- self.assertEqual(beconf['sr_uuid'], "FA15E-1D")
-
- def test_sm_backend_conf_update_raise_notfound(self):
- ctxt = context.get_admin_context()
- self.assertRaises(exception.NotFound,
- db.sm_backend_conf_update,
- ctxt,
- 7,
- dict(sr_uuid="FA15E-1D"))
-
- def test_sm_backend_conf_get(self):
- ctxt = context.get_admin_context()
- params = _get_sm_backend_params()
- beconf = db.sm_backend_conf_create(ctxt,
- params)
- val = db.sm_backend_conf_get(ctxt, beconf['id'])
- self.assertDictMatch(dict(val), dict(beconf))
-
- def test_sm_backend_conf_get_raise_notfound(self):
- ctxt = context.get_admin_context()
- self.assertRaises(exception.NotFound,
- db.sm_backend_conf_get,
- ctxt,
- 7)
-
- def test_sm_backend_conf_get_by_sr(self):
- ctxt = context.get_admin_context()
- params = _get_sm_backend_params()
- beconf = db.sm_backend_conf_create(ctxt,
- params)
- val = db.sm_backend_conf_get_by_sr(ctxt, beconf['sr_uuid'])
- self.assertDictMatch(dict(val), dict(beconf))
-
- def test_sm_backend_conf_get_by_sr_raise_notfound(self):
- ctxt = context.get_admin_context()
- self.assertRaises(exception.NotFound,
- db.sm_backend_conf_get_by_sr,
- ctxt,
- "FA15E-1D")
-
- def test_sm_backend_conf_delete(self):
- ctxt = context.get_admin_context()
- params = _get_sm_backend_params()
- beconf = db.sm_backend_conf_create(ctxt,
- params)
- db.sm_backend_conf_delete(ctxt, beconf['id'])
- self.assertRaises(exception.NotFound,
- db.sm_backend_conf_get,
- ctxt,
- beconf['id'])
-
- def test_sm_backend_conf_delete_nonexisting(self):
- ctxt = context.get_admin_context()
- db.sm_backend_conf_delete(ctxt, "FA15E-1D")
-
- def test_sm_flavor_create(self):
- ctxt = context.get_admin_context()
- params = _get_sm_flavor_params()
- flav = db.sm_flavor_create(ctxt,
- params)
- self.assertIsInstance(flav['id'], int)
-
- def sm_flavor_create_raise_duplicate(self):
- ctxt = context.get_admin_context()
- params = _get_sm_flavor_params()
- flav = db.sm_flavor_create(ctxt,
- params)
- self.assertRaises(exception.Duplicate,
- db.sm_flavor_create,
- params)
-
- def test_sm_flavor_update(self):
- ctxt = context.get_admin_context()
- params = _get_sm_flavor_params()
- flav = db.sm_flavor_create(ctxt,
- params)
- newparms = dict(description="basic volumes")
- flav = db.sm_flavor_update(ctxt, flav['id'], newparms)
- self.assertEqual(flav['description'], "basic volumes")
-
- def test_sm_flavor_update_raise_notfound(self):
- ctxt = context.get_admin_context()
- self.assertRaises(exception.NotFound,
- db.sm_flavor_update,
- ctxt,
- 7,
- dict(description="fakedesc"))
-
- def test_sm_flavor_delete(self):
- ctxt = context.get_admin_context()
- params = _get_sm_flavor_params()
- flav = db.sm_flavor_create(ctxt,
- params)
- db.sm_flavor_delete(ctxt, flav['id'])
- self.assertRaises(exception.NotFound,
- db.sm_flavor_get,
- ctxt,
- "gold")
-
- def test_sm_flavor_delete_nonexisting(self):
- ctxt = context.get_admin_context()
- db.sm_flavor_delete(ctxt, 7)
-
- def test_sm_flavor_get(self):
- ctxt = context.get_admin_context()
- params = _get_sm_flavor_params()
- flav = db.sm_flavor_create(ctxt,
- params)
- val = db.sm_flavor_get(ctxt, flav['id'])
- self.assertDictMatch(dict(val), dict(flav))
-
- def test_sm_flavor_get_raise_notfound(self):
- ctxt = context.get_admin_context()
- self.assertRaises(exception.NotFound,
- db.sm_flavor_get,
- ctxt,
- 7)
-
- def test_sm_flavor_get_by_label(self):
- ctxt = context.get_admin_context()
- params = _get_sm_flavor_params()
- flav = db.sm_flavor_create(ctxt,
- params)
- val = db.sm_flavor_get_by_label(ctxt, flav['label'])
- self.assertDictMatch(dict(val), dict(flav))
-
- def test_sm_flavor_get_by_label_raise_notfound(self):
- ctxt = context.get_admin_context()
- self.assertRaises(exception.NotFound,
- db.sm_flavor_get,
- ctxt,
- "fake")
diff --git a/nova/tests/test_filters.py b/nova/tests/test_filters.py
new file mode 100644
index 000000000..546b13180
--- /dev/null
+++ b/nova/tests/test_filters.py
@@ -0,0 +1,125 @@
+# Copyright 2012 OpenStack LLC. # All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Scheduler Host Filters.
+"""
+
+import inspect
+import sys
+
+from nova import filters
+from nova import loadables
+from nova import test
+
+
+class Filter1(filters.BaseFilter):
+ """Test Filter class #1."""
+ pass
+
+
+class Filter2(filters.BaseFilter):
+ """Test Filter class #2."""
+ pass
+
+
+class FiltersTestCase(test.TestCase):
+ def test_filter_all(self):
+ filter_obj_list = ['obj1', 'obj2', 'obj3']
+ filter_properties = 'fake_filter_properties'
+ base_filter = filters.BaseFilter()
+
+ self.mox.StubOutWithMock(base_filter, '_filter_one')
+
+ base_filter._filter_one('obj1', filter_properties).AndReturn(True)
+ base_filter._filter_one('obj2', filter_properties).AndReturn(False)
+ base_filter._filter_one('obj3', filter_properties).AndReturn(True)
+
+ self.mox.ReplayAll()
+
+ result = base_filter.filter_all(filter_obj_list, filter_properties)
+ self.assertTrue(inspect.isgenerator(result))
+ self.assertEqual(list(result), ['obj1', 'obj3'])
+
+ def test_filter_all_recursive_yields(self):
+ """Test filter_all() allows generators from previous filter_all()s."""
+ # filter_all() yields results. We want to make sure that we can
+ # call filter_all() with generators returned from previous calls
+ # to filter_all().
+ filter_obj_list = ['obj1', 'obj2', 'obj3']
+ filter_properties = 'fake_filter_properties'
+ base_filter = filters.BaseFilter()
+
+ self.mox.StubOutWithMock(base_filter, '_filter_one')
+
+ total_iterations = 200
+
+ # The order that _filter_one is going to get called gets
+ # confusing because we will be recursively yielding things..
+ # We are going to simulate the first call to filter_all()
+ # returning False for 'obj2'. So, 'obj1' will get yielded
+ # 'total_iterations' number of times before the first filter_all()
+ # call gets to processing 'obj2'. We then return 'False' for it.
+ # After that, 'obj3' gets yielded 'total_iterations' number of
+ # times.
+ for x in xrange(total_iterations):
+ base_filter._filter_one('obj1', filter_properties).AndReturn(True)
+ base_filter._filter_one('obj2', filter_properties).AndReturn(False)
+ for x in xrange(total_iterations):
+ base_filter._filter_one('obj3', filter_properties).AndReturn(True)
+ self.mox.ReplayAll()
+
+ objs = iter(filter_obj_list)
+ for x in xrange(total_iterations):
+ # Pass in generators returned from previous calls.
+ objs = base_filter.filter_all(objs, filter_properties)
+ self.assertTrue(inspect.isgenerator(objs))
+ self.assertEqual(list(objs), ['obj1', 'obj3'])
+
+ def test_get_filtered_objects(self):
+ filter_objs_initial = ['initial', 'filter1', 'objects1']
+ filter_objs_second = ['second', 'filter2', 'objects2']
+ filter_objs_last = ['last', 'filter3', 'objects3']
+ filter_properties = 'fake_filter_properties'
+
+ def _fake_base_loader_init(*args, **kwargs):
+ pass
+
+ self.stubs.Set(loadables.BaseLoader, '__init__',
+ _fake_base_loader_init)
+
+ filt1_mock = self.mox.CreateMock(Filter1)
+ filt2_mock = self.mox.CreateMock(Filter2)
+
+ self.mox.StubOutWithMock(sys.modules[__name__], 'Filter1',
+ use_mock_anything=True)
+ self.mox.StubOutWithMock(filt1_mock, 'filter_all')
+ self.mox.StubOutWithMock(sys.modules[__name__], 'Filter2',
+ use_mock_anything=True)
+ self.mox.StubOutWithMock(filt2_mock, 'filter_all')
+
+ Filter1().AndReturn(filt1_mock)
+ filt1_mock.filter_all(filter_objs_initial,
+ filter_properties).AndReturn(filter_objs_second)
+ Filter2().AndReturn(filt2_mock)
+ filt2_mock.filter_all(filter_objs_second,
+ filter_properties).AndReturn(filter_objs_last)
+
+ self.mox.ReplayAll()
+
+ filter_handler = filters.BaseFilterHandler(filters.BaseFilter)
+ filter_classes = [Filter1, Filter2]
+ result = filter_handler.get_filtered_objects(filter_classes,
+ filter_objs_initial,
+ filter_properties)
+ self.assertEqual(result, filter_objs_last)
diff --git a/nova/tests/test_flags.py b/nova/tests/test_flags.py
index ad94f6550..b832c95a4 100644
--- a/nova/tests/test_flags.py
+++ b/nova/tests/test_flags.py
@@ -18,13 +18,11 @@
# under the License.
from nova import config
-from nova import flags
from nova.openstack.common import cfg
from nova import test
CONF = config.CONF
-FLAGS = flags.FLAGS
-FLAGS.register_opt(cfg.StrOpt('flags_unittest',
+CONF.register_opt(cfg.StrOpt('flags_unittest',
default='foo',
help='for testing purposes only'))
@@ -43,7 +41,7 @@ class FlagsTestCase(test.TestCase):
self.assertEqual(CONF.answer, 256)
def test_getopt_non_interspersed_args(self):
- self.assert_('runtime_answer' not in FLAGS)
+ self.assert_('runtime_answer' not in CONF)
argv = ['flags_test', 'extra_arg', '--runtime_answer=60']
args = config.parse_args(argv, default_config_files=[])
@@ -51,42 +49,42 @@ class FlagsTestCase(test.TestCase):
self.assertEqual(argv, args)
def test_runtime_and_unknown_flags(self):
- self.assert_('runtime_answer' not in FLAGS)
+ self.assert_('runtime_answer' not in CONF)
import nova.tests.runtime_flags
- self.assert_('runtime_answer' in FLAGS)
- self.assertEqual(FLAGS.runtime_answer, 54)
+ self.assert_('runtime_answer' in CONF)
+ self.assertEqual(CONF.runtime_answer, 54)
def test_long_vs_short_flags(self):
- FLAGS.clear()
- FLAGS.register_cli_opt(cfg.StrOpt('duplicate_answer_long',
+ CONF.clear()
+ CONF.register_cli_opt(cfg.StrOpt('duplicate_answer_long',
default='val',
help='desc'))
argv = ['flags_test', '--duplicate_answer=60', 'extra_arg']
args = config.parse_args(argv, default_config_files=[])
- self.assert_('duplicate_answer' not in FLAGS)
- self.assert_(FLAGS.duplicate_answer_long, 60)
+ self.assert_('duplicate_answer' not in CONF)
+ self.assert_(CONF.duplicate_answer_long, 60)
- FLAGS.clear()
- FLAGS.register_cli_opt(cfg.IntOpt('duplicate_answer',
+ CONF.clear()
+ CONF.register_cli_opt(cfg.IntOpt('duplicate_answer',
default=60, help='desc'))
args = config.parse_args(argv, default_config_files=[])
- self.assertEqual(FLAGS.duplicate_answer, 60)
- self.assertEqual(FLAGS.duplicate_answer_long, 'val')
+ self.assertEqual(CONF.duplicate_answer, 60)
+ self.assertEqual(CONF.duplicate_answer_long, 'val')
def test_flag_leak_left(self):
- self.assertEqual(FLAGS.flags_unittest, 'foo')
+ self.assertEqual(CONF.flags_unittest, 'foo')
self.flags(flags_unittest='bar')
- self.assertEqual(FLAGS.flags_unittest, 'bar')
+ self.assertEqual(CONF.flags_unittest, 'bar')
def test_flag_leak_right(self):
- self.assertEqual(FLAGS.flags_unittest, 'foo')
+ self.assertEqual(CONF.flags_unittest, 'foo')
self.flags(flags_unittest='bar')
- self.assertEqual(FLAGS.flags_unittest, 'bar')
+ self.assertEqual(CONF.flags_unittest, 'bar')
def test_flag_overrides(self):
- self.assertEqual(FLAGS.flags_unittest, 'foo')
+ self.assertEqual(CONF.flags_unittest, 'foo')
self.flags(flags_unittest='bar')
- self.assertEqual(FLAGS.flags_unittest, 'bar')
- FLAGS.reset()
- self.assertEqual(FLAGS.flags_unittest, 'foo')
+ self.assertEqual(CONF.flags_unittest, 'bar')
+ CONF.reset()
+ self.assertEqual(CONF.flags_unittest, 'foo')
diff --git a/nova/tests/test_hypervapi.py b/nova/tests/test_hypervapi.py
index 6d2396350..dab5d027d 100644
--- a/nova/tests/test_hypervapi.py
+++ b/nova/tests/test_hypervapi.py
@@ -18,6 +18,7 @@
Test suite for the Hyper-V driver and related APIs.
"""
+import json
import os
import platform
import shutil
@@ -25,9 +26,9 @@ import sys
import uuid
from nova.compute import power_state
+from nova import config
from nova import context
from nova import db
-from nova import flags
from nova.image import glance
from nova.tests import fake_network
from nova.tests.hyperv import basetestcase
@@ -40,6 +41,8 @@ from nova.virt.hyperv import driver as driver_hyperv
from nova.virt.hyperv import vmutils
from nova.virt import images
+CONF = config.CONF
+
class HyperVAPITestCase(basetestcase.BaseTestCase):
"""Unit tests for Hyper-V driver calls."""
@@ -56,8 +59,8 @@ class HyperVAPITestCase(basetestcase.BaseTestCase):
self._update_image_raise_exception = False
self._post_method_called = False
self._recover_method_called = False
- self._volume_target_portal = '192.168.1.112:3260'
- self._volume_id = '10958016-e196-42e3-9e7f-5d8927ae3099'
+ self._volume_target_portal = 'testtargetportal:3260'
+ self._volume_id = 'd3f99512-af51-4a75-aee1-79875e016159'
self._context = context.RequestContext(self._user_id, self._project_id)
self._setup_stubs()
@@ -71,6 +74,11 @@ class HyperVAPITestCase(basetestcase.BaseTestCase):
def _setup_stubs(self):
db_fakes.stub_out_db_instance_api(self.stubs)
fake_image.stub_out_image_service(self.stubs)
+ fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
+
+ def fake_dumps(msg):
+ return '""'
+ self.stubs.Set(json, 'dumps', fake_dumps)
def fake_fetch(context, image_id, target, user, project):
self._fetched_image = target
@@ -98,7 +106,9 @@ class HyperVAPITestCase(basetestcase.BaseTestCase):
'time',
'subprocess',
'multiprocessing',
- '_winreg'
+ '_winreg',
+ 'nova.virt.configdrive',
+ 'nova.utils'
]
# Modules in which the mocks are going to be injected
@@ -121,6 +131,7 @@ class HyperVAPITestCase(basetestcase.BaseTestCase):
snapshotops,
livemigrationops,
hypervutils,
+ db_fakes,
sys.modules[__name__]
]
@@ -148,14 +159,14 @@ class HyperVAPITestCase(basetestcase.BaseTestCase):
self._hypervutils.logout_iscsi_volume_sessions(self._volume_id)
- shutil.rmtree(flags.FLAGS.instances_path, True)
+ shutil.rmtree(CONF.instances_path, True)
fake_image.FakeImageService_reset()
finally:
super(HyperVAPITestCase, self).tearDown()
def test_get_available_resource(self):
- dic = self._conn.get_available_resource()
+ dic = self._conn.get_available_resource(None)
self.assertEquals(dic['hypervisor_hostname'], platform.node())
@@ -185,6 +196,39 @@ class HyperVAPITestCase(basetestcase.BaseTestCase):
def test_spawn_no_cow_image(self):
self._test_spawn_instance(False)
+ def test_spawn_config_drive(self):
+ self.flags(force_config_drive=True)
+ self.flags(mkisofs_cmd='mkisofs.exe')
+
+ self._spawn_instance(True)
+
+ (vhd_paths, _, dvd_paths) = self._hypervutils.get_vm_disks(
+ self._instance_data["name"])
+ self.assertEquals(len(dvd_paths), 0)
+ self.assertEquals(len(vhd_paths), 2)
+
+ def test_spawn_config_drive_cdrom(self):
+ self.flags(force_config_drive=True)
+ self.flags(config_drive_cdrom=True)
+ self.flags(mkisofs_cmd='mkisofs.exe')
+
+ self._spawn_instance(True)
+
+ (vhd_paths, _, dvd_paths) = self._hypervutils.get_vm_disks(
+ self._instance_data["name"])
+ self.assertEquals(len(dvd_paths), 1)
+ self.assertEquals(len(vhd_paths), 1)
+ self.assertTrue(os.path.exists(dvd_paths[0]))
+
+ def test_spawn_no_config_drive(self):
+ self.flags(force_config_drive=False)
+
+ self._spawn_instance(True)
+
+ (_, _, dvd_paths) = self._hypervutils.get_vm_disks(
+ self._instance_data["name"])
+ self.assertEquals(len(dvd_paths), 0)
+
def test_spawn_no_vswitch_exception(self):
# Set flag to a non existing vswitch
self.flags(vswitch_name=str(uuid.uuid4()))
@@ -269,7 +313,7 @@ class HyperVAPITestCase(basetestcase.BaseTestCase):
def test_destroy(self):
self._spawn_instance(True)
- (vhd_paths, _) = self._hypervutils.get_vm_disks(
+ (vhd_paths, _, _) = self._hypervutils.get_vm_disks(
self._instance_data["name"])
self._conn.destroy(self._instance_data)
@@ -286,7 +330,7 @@ class HyperVAPITestCase(basetestcase.BaseTestCase):
self._spawn_instance(False)
# Existing server
- self._dest_server = "HV12RCTest1"
+ self._dest_server = "HV12OSDEMO2"
self._live_migration(self._dest_server)
@@ -408,7 +452,7 @@ class HyperVAPITestCase(basetestcase.BaseTestCase):
vmstate = self._hypervutils.get_vm_state(self._instance_data["name"])
self.assertEquals(vmstate, constants.HYPERV_VM_STATE_ENABLED)
- (vhd_paths, _) = self._hypervutils.get_vm_disks(
+ (vhd_paths, _, _) = self._hypervutils.get_vm_disks(
self._instance_data["name"])
self.assertEquals(len(vhd_paths), 1)
@@ -431,7 +475,7 @@ class HyperVAPITestCase(basetestcase.BaseTestCase):
def test_attach_volume(self):
self._attach_volume()
- (_, volumes_paths) = self._hypervutils.get_vm_disks(
+ (_, volumes_paths, _) = self._hypervutils.get_vm_disks(
self._instance_data["name"])
self.assertEquals(len(volumes_paths), 1)
@@ -447,7 +491,7 @@ class HyperVAPITestCase(basetestcase.BaseTestCase):
self._conn.detach_volume(connection_info,
self._instance_data["name"], '/dev/sdc')
- (_, volumes_paths) = self._hypervutils.get_vm_disks(
+ (_, volumes_paths, _) = self._hypervutils.get_vm_disks(
self._instance_data["name"])
self.assertEquals(len(volumes_paths), 0)
@@ -461,7 +505,7 @@ class HyperVAPITestCase(basetestcase.BaseTestCase):
self._spawn_instance(False, block_device_info)
- (_, volumes_paths) = self._hypervutils.get_vm_disks(
+ (_, volumes_paths, _) = self._hypervutils.get_vm_disks(
self._instance_data["name"])
self.assertEquals(len(volumes_paths), 1)
diff --git a/nova/tests/test_image_utils.py b/nova/tests/test_image_utils.py
index fac0422bf..9c040f2e1 100644
--- a/nova/tests/test_image_utils.py
+++ b/nova/tests/test_image_utils.py
@@ -18,9 +18,92 @@ from nova import test
from nova import utils
from nova.virt import images
+from nova.virt.libvirt import utils as libvirt_utils
class ImageUtilsTestCase(test.TestCase):
+ def test_disk_type(self):
+ # Seems like lvm detection
+ # if its in /dev ??
+ for p in ['/dev/b', '/dev/blah/blah']:
+ d_type = libvirt_utils.get_disk_type(p)
+ self.assertEquals('lvm', d_type)
+ # Try the other types
+ template_output = """image: %(path)s
+file format: %(format)s
+virtual size: 64M (67108864 bytes)
+cluster_size: 65536
+disk size: 96K
+"""
+ path = '/myhome/disk.config'
+ for f in ['raw', 'qcow2']:
+ output = template_output % ({
+ 'format': f,
+ 'path': path,
+ })
+ self.mox.StubOutWithMock(utils, 'execute')
+ utils.execute('env', 'LC_ALL=C', 'LANG=C',
+ 'qemu-img', 'info', path).AndReturn((output, ''))
+ self.mox.ReplayAll()
+ d_type = libvirt_utils.get_disk_type(path)
+ self.assertEquals(f, d_type)
+ self.mox.UnsetStubs()
+
+ def test_disk_backing(self):
+ path = '/myhome/disk.config'
+ template_output = """image: %(path)s
+file format: raw
+virtual size: 2K (2048 bytes)
+cluster_size: 65536
+disk size: 96K
+"""
+ output = template_output % ({
+ 'path': path,
+ })
+ self.mox.StubOutWithMock(utils, 'execute')
+ utils.execute('env', 'LC_ALL=C', 'LANG=C',
+ 'qemu-img', 'info', path).AndReturn((output, ''))
+ self.mox.ReplayAll()
+ d_backing = libvirt_utils.get_disk_backing_file(path)
+ self.assertEquals(None, d_backing)
+
+ def test_disk_size(self):
+ path = '/myhome/disk.config'
+ template_output = """image: %(path)s
+file format: raw
+virtual size: %(v_size)s (%(vsize_b)s bytes)
+cluster_size: 65536
+disk size: 96K
+"""
+ for i in range(0, 128):
+ bytes = i * 65336
+ kbytes = bytes / 1024
+ mbytes = kbytes / 1024
+ output = template_output % ({
+ 'v_size': "%sM" % (mbytes),
+ 'vsize_b': i,
+ 'path': path,
+ })
+ self.mox.StubOutWithMock(utils, 'execute')
+ utils.execute('env', 'LC_ALL=C', 'LANG=C',
+ 'qemu-img', 'info', path).AndReturn((output, ''))
+ self.mox.ReplayAll()
+ d_size = libvirt_utils.get_disk_size(path)
+ self.assertEquals(i, d_size)
+ self.mox.UnsetStubs()
+ output = template_output % ({
+ 'v_size': "%sK" % (kbytes),
+ 'vsize_b': i,
+ 'path': path,
+ })
+ self.mox.StubOutWithMock(utils, 'execute')
+ utils.execute('env', 'LC_ALL=C', 'LANG=C',
+ 'qemu-img', 'info', path).AndReturn((output, ''))
+ self.mox.ReplayAll()
+ d_size = libvirt_utils.get_disk_size(path)
+ self.assertEquals(i, d_size)
+ self.mox.UnsetStubs()
+
def test_qemu_info_canon(self):
path = "disk.config"
example_output = """image: disk.config
diff --git a/nova/tests/test_imagebackend.py b/nova/tests/test_imagebackend.py
index 088cb0a82..224127f54 100644
--- a/nova/tests/test_imagebackend.py
+++ b/nova/tests/test_imagebackend.py
@@ -17,13 +17,13 @@
import os
-from nova import flags
+from nova import config
from nova.openstack.common import fileutils
from nova import test
from nova.tests import fake_libvirt_utils
from nova.virt.libvirt import imagebackend
-FLAGS = flags.FLAGS
+CONF = config.CONF
class _ImageTestCase(test.TestCase):
@@ -42,9 +42,9 @@ class _ImageTestCase(test.TestCase):
self.NAME = 'fake.vm'
self.TEMPLATE = 'template'
- self.PATH = os.path.join(FLAGS.instances_path, self.INSTANCE,
+ self.PATH = os.path.join(CONF.instances_path, self.INSTANCE,
self.NAME)
- self.TEMPLATE_DIR = os.path.join(FLAGS.instances_path,
+ self.TEMPLATE_DIR = os.path.join(CONF.instances_path,
'_base')
self.TEMPLATE_PATH = os.path.join(self.TEMPLATE_DIR, 'template')
diff --git a/nova/tests/test_imagecache.py b/nova/tests/test_imagecache.py
index 8203277ae..3a1b40c4f 100644
--- a/nova/tests/test_imagecache.py
+++ b/nova/tests/test_imagecache.py
@@ -27,16 +27,15 @@ from nova import test
from nova.compute import manager as compute_manager
from nova.compute import vm_states
+from nova import config
from nova import db
-from nova import flags
from nova.openstack.common import importutils
from nova.openstack.common import log
from nova import utils
from nova.virt.libvirt import imagecache
from nova.virt.libvirt import utils as virtutils
-
-FLAGS = flags.FLAGS
+CONF = config.CONF
LOG = log.getLogger(__name__)
@@ -147,13 +146,13 @@ class ImageCacheManagerTestCase(test.TestCase):
def test_list_running_instances(self):
all_instances = [{'image_ref': '1',
- 'host': FLAGS.host,
+ 'host': CONF.host,
'name': 'inst-1',
'uuid': '123',
'vm_state': '',
'task_state': ''},
{'image_ref': '2',
- 'host': FLAGS.host,
+ 'host': CONF.host,
'name': 'inst-2',
'uuid': '456',
'vm_state': '',
@@ -182,7 +181,7 @@ class ImageCacheManagerTestCase(test.TestCase):
def test_list_resizing_instances(self):
all_instances = [{'image_ref': '1',
- 'host': FLAGS.host,
+ 'host': CONF.host,
'name': 'inst-1',
'uuid': '123',
'vm_state': vm_states.RESIZED,
@@ -209,7 +208,7 @@ class ImageCacheManagerTestCase(test.TestCase):
self.stubs.Set(virtutils, 'get_disk_backing_file',
lambda x: 'e97222e91fc4241f49a7f520d1dcf446751129b3_sm')
- found = os.path.join(FLAGS.instances_path, FLAGS.base_dir_name,
+ found = os.path.join(CONF.instances_path, CONF.base_dir_name,
'e97222e91fc4241f49a7f520d1dcf446751129b3_sm')
image_cache_manager = imagecache.ImageCacheManager()
@@ -231,7 +230,7 @@ class ImageCacheManagerTestCase(test.TestCase):
lambda x: ('e97222e91fc4241f49a7f520d1dcf446751129b3_'
'10737418240'))
- found = os.path.join(FLAGS.instances_path, FLAGS.base_dir_name,
+ found = os.path.join(CONF.instances_path, CONF.base_dir_name,
'e97222e91fc4241f49a7f520d1dcf446751129b3_'
'10737418240')
@@ -252,7 +251,7 @@ class ImageCacheManagerTestCase(test.TestCase):
self.stubs.Set(virtutils, 'get_disk_backing_file',
lambda x: 'e97222e91fc4241f49a7f520d1dcf446751129b3_sm')
- found = os.path.join(FLAGS.instances_path, FLAGS.base_dir_name,
+ found = os.path.join(CONF.instances_path, CONF.base_dir_name,
'e97222e91fc4241f49a7f520d1dcf446751129b3_sm')
image_cache_manager = imagecache.ImageCacheManager()
@@ -789,13 +788,13 @@ class ImageCacheManagerTestCase(test.TestCase):
# Fake the database call which lists running instances
all_instances = [{'image_ref': '1',
- 'host': FLAGS.host,
+ 'host': CONF.host,
'name': 'instance-1',
'uuid': '123',
'vm_state': '',
'task_state': ''},
{'image_ref': '1',
- 'host': FLAGS.host,
+ 'host': CONF.host,
'name': 'instance-2',
'uuid': '456',
'vm_state': '',
@@ -868,11 +867,11 @@ class ImageCacheManagerTestCase(test.TestCase):
self.flags(instances_path='/tmp/no/such/dir/name/please')
self.flags(image_info_filename_pattern=('$instances_path/_base/'
'%(image)s.info'))
- base_filename = os.path.join(FLAGS.instances_path, '_base', hashed)
+ base_filename = os.path.join(CONF.instances_path, '_base', hashed)
self.assertFalse(virtutils.is_valid_info_file('banana'))
self.assertFalse(virtutils.is_valid_info_file(
- os.path.join(FLAGS.instances_path, '_base', '00000001')))
+ os.path.join(CONF.instances_path, '_base', '00000001')))
self.assertFalse(virtutils.is_valid_info_file(base_filename))
self.assertFalse(virtutils.is_valid_info_file(base_filename + '.sha1'))
self.assertTrue(virtutils.is_valid_info_file(base_filename + '.info'))
@@ -889,13 +888,13 @@ class ImageCacheManagerTestCase(test.TestCase):
# Fake the database call which lists running instances
all_instances = [{'image_ref': '1',
- 'host': FLAGS.host,
+ 'host': CONF.host,
'name': 'instance-1',
'uuid': '123',
'vm_state': '',
'task_state': ''},
{'image_ref': '1',
- 'host': FLAGS.host,
+ 'host': CONF.host,
'name': 'instance-2',
'uuid': '456',
'vm_state': '',
@@ -927,19 +926,19 @@ class ImageCacheManagerTestCase(test.TestCase):
def fake_get_all(context):
was['called'] = True
return [{'image_ref': '1',
- 'host': FLAGS.host,
+ 'host': CONF.host,
'name': 'instance-1',
'uuid': '123',
'vm_state': '',
'task_state': ''},
{'image_ref': '1',
- 'host': FLAGS.host,
+ 'host': CONF.host,
'name': 'instance-2',
'uuid': '456',
'vm_state': '',
'task_state': ''}]
self.stubs.Set(db, 'instance_get_all', fake_get_all)
- compute = importutils.import_object(FLAGS.compute_manager)
+ compute = importutils.import_object(CONF.compute_manager)
compute._run_image_cache_manager_pass(None)
self.assertTrue(was['called'])
diff --git a/nova/tests/test_instance_types.py b/nova/tests/test_instance_types.py
index 43d698374..deb165682 100644
--- a/nova/tests/test_instance_types.py
+++ b/nova/tests/test_instance_types.py
@@ -18,16 +18,15 @@ Unit Tests for instance types code
import time
from nova.compute import instance_types
+from nova import config
from nova import context
from nova import db
from nova.db.sqlalchemy import models
from nova.db.sqlalchemy import session as sql_session
from nova import exception
-from nova import flags
from nova.openstack.common import log as logging
from nova import test
-FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py
index ea35ff29e..b2bc88780 100644
--- a/nova/tests/test_libvirt.py
+++ b/nova/tests/test_libvirt.py
@@ -33,10 +33,10 @@ from nova.compute import instance_types
from nova.compute import power_state
from nova.compute import vm_mode
from nova.compute import vm_states
+from nova import config
from nova import context
from nova import db
from nova import exception
-from nova import flags
from nova.openstack.common import fileutils
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
@@ -45,13 +45,14 @@ from nova import test
from nova.tests import fake_libvirt_utils
from nova.tests import fake_network
import nova.tests.image.fake
+from nova.tests import matchers
from nova import utils
from nova.virt.disk import api as disk
from nova.virt import driver
from nova.virt import fake
from nova.virt import firewall as base_firewall
from nova.virt import images
-from nova.virt.libvirt import config
+from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import driver as libvirt_driver
from nova.virt.libvirt import firewall
from nova.virt.libvirt import imagebackend
@@ -59,7 +60,6 @@ from nova.virt.libvirt import snapshots
from nova.virt.libvirt import utils as libvirt_utils
from nova.virt.libvirt import volume
from nova.virt.libvirt import volume_nfs
-from nova.volume import driver as volume_driver
try:
@@ -69,7 +69,7 @@ except ImportError:
libvirt_driver.libvirt = libvirt
-FLAGS = flags.FLAGS
+CONF = config.CONF
LOG = logging.getLogger(__name__)
_fake_network_info = fake_network.fake_get_instance_nw_info
@@ -172,19 +172,26 @@ class LibvirtVolumeTestCase(test.TestCase):
self.assertEqual(tree.get('type'), 'block')
self.assertEqual(tree.find('./serial').text, 'fake_serial')
+ def iscsi_connection(self, volume, location, iqn):
+ return {
+ 'driver_volume_type': 'iscsi',
+ 'data': {
+ 'volume_id': volume['id'],
+ 'target_portal': location,
+ 'target_iqn': iqn,
+ 'target_lun': 1,
+ }
+ }
+
def test_libvirt_iscsi_driver(self):
# NOTE(vish) exists is to make driver assume connecting worked
self.stubs.Set(os.path, 'exists', lambda x: True)
- vol_driver = volume_driver.ISCSIDriver()
libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
location = '10.0.2.15:3260'
name = 'volume-00000001'
iqn = 'iqn.2010-10.org.openstack:%s' % name
- vol = {'id': 1,
- 'name': name,
- 'provider_auth': None,
- 'provider_location': '%s,fake %s' % (location, iqn)}
- connection_info = vol_driver.initialize_connection(vol, self.connr)
+ vol = {'id': 1, 'name': name}
+ connection_info = self.iscsi_connection(vol, location, iqn)
mount_device = "vde"
conf = libvirt_driver.connect_volume(connection_info, mount_device)
tree = conf.format_dom()
@@ -192,7 +199,6 @@ class LibvirtVolumeTestCase(test.TestCase):
self.assertEqual(tree.get('type'), 'block')
self.assertEqual(tree.find('./source').get('dev'), dev_str)
libvirt_driver.disconnect_volume(connection_info, mount_device)
- connection_info = vol_driver.terminate_connection(vol, self.connr)
expected_commands = [('iscsiadm', '-m', 'node', '-T', iqn,
'-p', location),
('iscsiadm', '-m', 'node', '-T', iqn,
@@ -212,18 +218,14 @@ class LibvirtVolumeTestCase(test.TestCase):
def test_libvirt_iscsi_driver_still_in_use(self):
# NOTE(vish) exists is to make driver assume connecting worked
self.stubs.Set(os.path, 'exists', lambda x: True)
- vol_driver = volume_driver.ISCSIDriver()
libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
location = '10.0.2.15:3260'
name = 'volume-00000001'
iqn = 'iqn.2010-10.org.openstack:%s' % name
devs = ['/dev/disk/by-path/ip-%s-iscsi-%s-lun-1' % (location, iqn)]
self.stubs.Set(self.fake_conn, 'get_all_block_devices', lambda: devs)
- vol = {'id': 1,
- 'name': name,
- 'provider_auth': None,
- 'provider_location': '%s,fake %s' % (location, iqn)}
- connection_info = vol_driver.initialize_connection(vol, self.connr)
+ vol = {'id': 1, 'name': name}
+ connection_info = self.iscsi_connection(vol, location, iqn)
mount_device = "vde"
conf = libvirt_driver.connect_volume(connection_info, mount_device)
tree = conf.format_dom()
@@ -231,7 +233,6 @@ class LibvirtVolumeTestCase(test.TestCase):
self.assertEqual(tree.get('type'), 'block')
self.assertEqual(tree.find('./source').get('dev'), dev_str)
libvirt_driver.disconnect_volume(connection_info, mount_device)
- connection_info = vol_driver.terminate_connection(vol, self.connr)
expected_commands = [('iscsiadm', '-m', 'node', '-T', iqn,
'-p', location),
('iscsiadm', '-m', 'node', '-T', iqn,
@@ -241,12 +242,19 @@ class LibvirtVolumeTestCase(test.TestCase):
'-n', 'node.startup', '-v', 'automatic')]
self.assertEqual(self.executes, expected_commands)
+ def sheepdog_connection(self, volume):
+ return {
+ 'driver_volume_type': 'sheepdog',
+ 'data': {
+ 'name': volume['name']
+ }
+ }
+
def test_libvirt_sheepdog_driver(self):
- vol_driver = volume_driver.SheepdogDriver()
libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
name = 'volume-00000001'
vol = {'id': 1, 'name': name}
- connection_info = vol_driver.initialize_connection(vol, self.connr)
+ connection_info = self.sheepdog_connection(vol)
mount_device = "vde"
conf = libvirt_driver.connect_volume(connection_info, mount_device)
tree = conf.format_dom()
@@ -254,31 +262,39 @@ class LibvirtVolumeTestCase(test.TestCase):
self.assertEqual(tree.find('./source').get('protocol'), 'sheepdog')
self.assertEqual(tree.find('./source').get('name'), name)
libvirt_driver.disconnect_volume(connection_info, mount_device)
- connection_info = vol_driver.terminate_connection(vol, self.connr)
+
+ def rbd_connection(self, volume):
+ return {
+ 'driver_volume_type': 'rbd',
+ 'data': {
+ 'name': '%s/%s' % ('rbd', volume['name']),
+ 'auth_enabled': CONF.rbd_secret_uuid is not None,
+ 'auth_username': CONF.rbd_user,
+ 'secret_type': 'ceph',
+ 'secret_uuid': CONF.rbd_secret_uuid,
+ }
+ }
def test_libvirt_rbd_driver(self):
- vol_driver = volume_driver.RBDDriver()
libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
name = 'volume-00000001'
vol = {'id': 1, 'name': name}
- connection_info = vol_driver.initialize_connection(vol, self.connr)
+ connection_info = self.rbd_connection(vol)
mount_device = "vde"
conf = libvirt_driver.connect_volume(connection_info, mount_device)
tree = conf.format_dom()
self.assertEqual(tree.get('type'), 'network')
self.assertEqual(tree.find('./source').get('protocol'), 'rbd')
- rbd_name = '%s/%s' % (FLAGS.rbd_pool, name)
+ rbd_name = '%s/%s' % ('rbd', name)
self.assertEqual(tree.find('./source').get('name'), rbd_name)
self.assertEqual(tree.find('./source/auth'), None)
libvirt_driver.disconnect_volume(connection_info, mount_device)
- connection_info = vol_driver.terminate_connection(vol, self.connr)
def test_libvirt_rbd_driver_auth_enabled(self):
- vol_driver = volume_driver.RBDDriver()
libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
name = 'volume-00000001'
vol = {'id': 1, 'name': name}
- connection_info = vol_driver.initialize_connection(vol, self.connr)
+ connection_info = self.rbd_connection(vol)
uuid = '875a8070-d0b9-4949-8b31-104d125c9a64'
user = 'foo'
secret_type = 'ceph'
@@ -292,20 +308,18 @@ class LibvirtVolumeTestCase(test.TestCase):
tree = conf.format_dom()
self.assertEqual(tree.get('type'), 'network')
self.assertEqual(tree.find('./source').get('protocol'), 'rbd')
- rbd_name = '%s/%s' % (FLAGS.rbd_pool, name)
+ rbd_name = '%s/%s' % ('rbd', name)
self.assertEqual(tree.find('./source').get('name'), rbd_name)
self.assertEqual(tree.find('./auth').get('username'), user)
self.assertEqual(tree.find('./auth/secret').get('type'), secret_type)
self.assertEqual(tree.find('./auth/secret').get('uuid'), uuid)
libvirt_driver.disconnect_volume(connection_info, mount_device)
- connection_info = vol_driver.terminate_connection(vol, self.connr)
def test_libvirt_rbd_driver_auth_enabled_flags_override(self):
- vol_driver = volume_driver.RBDDriver()
libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
name = 'volume-00000001'
vol = {'id': 1, 'name': name}
- connection_info = vol_driver.initialize_connection(vol, self.connr)
+ connection_info = self.rbd_connection(vol)
uuid = '875a8070-d0b9-4949-8b31-104d125c9a64'
user = 'foo'
secret_type = 'ceph'
@@ -324,20 +338,18 @@ class LibvirtVolumeTestCase(test.TestCase):
tree = conf.format_dom()
self.assertEqual(tree.get('type'), 'network')
self.assertEqual(tree.find('./source').get('protocol'), 'rbd')
- rbd_name = '%s/%s' % (FLAGS.rbd_pool, name)
+ rbd_name = '%s/%s' % ('rbd', name)
self.assertEqual(tree.find('./source').get('name'), rbd_name)
self.assertEqual(tree.find('./auth').get('username'), flags_user)
self.assertEqual(tree.find('./auth/secret').get('type'), secret_type)
self.assertEqual(tree.find('./auth/secret').get('uuid'), flags_uuid)
libvirt_driver.disconnect_volume(connection_info, mount_device)
- connection_info = vol_driver.terminate_connection(vol, self.connr)
def test_libvirt_rbd_driver_auth_disabled(self):
- vol_driver = volume_driver.RBDDriver()
libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
name = 'volume-00000001'
vol = {'id': 1, 'name': name}
- connection_info = vol_driver.initialize_connection(vol, self.connr)
+ connection_info = self.rbd_connection(vol)
uuid = '875a8070-d0b9-4949-8b31-104d125c9a64'
user = 'foo'
secret_type = 'ceph'
@@ -351,18 +363,16 @@ class LibvirtVolumeTestCase(test.TestCase):
tree = conf.format_dom()
self.assertEqual(tree.get('type'), 'network')
self.assertEqual(tree.find('./source').get('protocol'), 'rbd')
- rbd_name = '%s/%s' % (FLAGS.rbd_pool, name)
+ rbd_name = '%s/%s' % ('rbd', name)
self.assertEqual(tree.find('./source').get('name'), rbd_name)
self.assertEqual(tree.find('./auth'), None)
libvirt_driver.disconnect_volume(connection_info, mount_device)
- connection_info = vol_driver.terminate_connection(vol, self.connr)
def test_libvirt_rbd_driver_auth_disabled_flags_override(self):
- vol_driver = volume_driver.RBDDriver()
libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
name = 'volume-00000001'
vol = {'id': 1, 'name': name}
- connection_info = vol_driver.initialize_connection(vol, self.connr)
+ connection_info = self.rbd_connection(vol)
uuid = '875a8070-d0b9-4949-8b31-104d125c9a64'
user = 'foo'
secret_type = 'ceph'
@@ -383,26 +393,21 @@ class LibvirtVolumeTestCase(test.TestCase):
tree = conf.format_dom()
self.assertEqual(tree.get('type'), 'network')
self.assertEqual(tree.find('./source').get('protocol'), 'rbd')
- rbd_name = '%s/%s' % (FLAGS.rbd_pool, name)
+ rbd_name = '%s/%s' % ('rbd', name)
self.assertEqual(tree.find('./source').get('name'), rbd_name)
self.assertEqual(tree.find('./auth').get('username'), flags_user)
self.assertEqual(tree.find('./auth/secret').get('type'), secret_type)
self.assertEqual(tree.find('./auth/secret').get('uuid'), flags_uuid)
libvirt_driver.disconnect_volume(connection_info, mount_device)
- connection_info = vol_driver.terminate_connection(vol, self.connr)
def test_libvirt_lxc_volume(self):
self.stubs.Set(os.path, 'exists', lambda x: True)
- vol_driver = volume_driver.ISCSIDriver()
libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
- location = '10.0.2.15:3260'
name = 'volume-00000001'
+ location = '10.0.2.15:3260'
iqn = 'iqn.2010-10.org.openstack:%s' % name
- vol = {'id': 1,
- 'name': name,
- 'provider_auth': None,
- 'provider_location': '%s,fake %s' % (location, iqn)}
- connection_info = vol_driver.initialize_connection(vol, self.connr)
+ vol = {'id': 1, 'name': name}
+ connection_info = self.iscsi_connection(vol, location, iqn)
mount_device = "vde"
conf = libvirt_driver.connect_volume(connection_info, mount_device)
tree = conf.format_dom()
@@ -410,7 +415,6 @@ class LibvirtVolumeTestCase(test.TestCase):
self.assertEqual(tree.get('type'), 'block')
self.assertEqual(tree.find('./source').get('dev'), dev_str)
libvirt_driver.disconnect_volume(connection_info, mount_device)
- connection_info = vol_driver.terminate_connection(vol, self.connr)
def test_libvirt_nfs_driver(self):
# NOTE(vish) exists is to make driver assume connecting worked
@@ -448,11 +452,11 @@ class CacheConcurrencyTestCase(test.TestCase):
# which can cause race conditions with the multiple threads we
# use for tests. So, create the path here so utils.synchronized()
# won't delete it out from under one of the threads.
- self.lock_path = os.path.join(FLAGS.instances_path, 'locks')
+ self.lock_path = os.path.join(CONF.instances_path, 'locks')
fileutils.ensure_tree(self.lock_path)
def fake_exists(fname):
- basedir = os.path.join(FLAGS.instances_path, FLAGS.base_dir_name)
+ basedir = os.path.join(CONF.instances_path, CONF.base_dir_name)
if fname == basedir or fname == self.lock_path:
return True
return False
@@ -630,7 +634,7 @@ class LibvirtConnTestCase(test.TestCase):
'id': 'fake'
}
result = conn.get_volume_connector(volume)
- self.assertDictMatch(expected, result)
+ self.assertThat(expected, matchers.DictMatches(result))
def test_get_guest_config(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
@@ -647,28 +651,28 @@ class LibvirtConnTestCase(test.TestCase):
self.assertEquals(cfg.os_root, None)
self.assertEquals(len(cfg.devices), 7)
self.assertEquals(type(cfg.devices[0]),
- config.LibvirtConfigGuestDisk)
+ vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[1]),
- config.LibvirtConfigGuestDisk)
+ vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[2]),
- config.LibvirtConfigGuestInterface)
+ vconfig.LibvirtConfigGuestInterface)
self.assertEquals(type(cfg.devices[3]),
- config.LibvirtConfigGuestSerial)
+ vconfig.LibvirtConfigGuestSerial)
self.assertEquals(type(cfg.devices[4]),
- config.LibvirtConfigGuestSerial)
+ vconfig.LibvirtConfigGuestSerial)
self.assertEquals(type(cfg.devices[5]),
- config.LibvirtConfigGuestInput)
+ vconfig.LibvirtConfigGuestInput)
self.assertEquals(type(cfg.devices[6]),
- config.LibvirtConfigGuestGraphics)
+ vconfig.LibvirtConfigGuestGraphics)
self.assertEquals(type(cfg.clock),
- config.LibvirtConfigGuestClock)
+ vconfig.LibvirtConfigGuestClock)
self.assertEquals(cfg.clock.offset, "utc")
self.assertEquals(len(cfg.clock.timers), 2)
self.assertEquals(type(cfg.clock.timers[0]),
- config.LibvirtConfigGuestTimer)
+ vconfig.LibvirtConfigGuestTimer)
self.assertEquals(type(cfg.clock.timers[1]),
- config.LibvirtConfigGuestTimer)
+ vconfig.LibvirtConfigGuestTimer)
self.assertEquals(cfg.clock.timers[0].name, "pit")
self.assertEquals(cfg.clock.timers[0].tickpolicy,
"delay")
@@ -691,21 +695,21 @@ class LibvirtConnTestCase(test.TestCase):
self.assertEquals(cfg.os_root, None)
self.assertEquals(len(cfg.devices), 8)
self.assertEquals(type(cfg.devices[0]),
- config.LibvirtConfigGuestDisk)
+ vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[1]),
- config.LibvirtConfigGuestDisk)
+ vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[2]),
- config.LibvirtConfigGuestInterface)
+ vconfig.LibvirtConfigGuestInterface)
self.assertEquals(type(cfg.devices[3]),
- config.LibvirtConfigGuestInterface)
+ vconfig.LibvirtConfigGuestInterface)
self.assertEquals(type(cfg.devices[4]),
- config.LibvirtConfigGuestSerial)
+ vconfig.LibvirtConfigGuestSerial)
self.assertEquals(type(cfg.devices[5]),
- config.LibvirtConfigGuestSerial)
+ vconfig.LibvirtConfigGuestSerial)
self.assertEquals(type(cfg.devices[6]),
- config.LibvirtConfigGuestInput)
+ vconfig.LibvirtConfigGuestInput)
self.assertEquals(type(cfg.devices[7]),
- config.LibvirtConfigGuestGraphics)
+ vconfig.LibvirtConfigGuestGraphics)
def test_get_guest_config_with_root_device_name(self):
self.flags(libvirt_type='uml')
@@ -722,11 +726,11 @@ class LibvirtConnTestCase(test.TestCase):
self.assertEquals(cfg.os_root, 'dev/vdb')
self.assertEquals(len(cfg.devices), 3)
self.assertEquals(type(cfg.devices[0]),
- config.LibvirtConfigGuestDisk)
+ vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[1]),
- config.LibvirtConfigGuestDisk)
+ vconfig.LibvirtConfigGuestDisk)
self.assertEquals(type(cfg.devices[2]),
- config.LibvirtConfigGuestConsole)
+ vconfig.LibvirtConfigGuestConsole)
def test_get_guest_config_with_block_device(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
@@ -739,10 +743,10 @@ class LibvirtConnTestCase(test.TestCase):
cfg = conn.get_guest_config(instance_ref, [], None, None, info)
self.assertEquals(type(cfg.devices[2]),
- config.LibvirtConfigGuestDisk)
+ vconfig.LibvirtConfigGuestDisk)
self.assertEquals(cfg.devices[2].target_dev, 'vdc')
self.assertEquals(type(cfg.devices[3]),
- config.LibvirtConfigGuestDisk)
+ vconfig.LibvirtConfigGuestDisk)
self.assertEquals(cfg.devices[3].target_dev, 'vdd')
def test_get_guest_cpu_config_none(self):
@@ -772,7 +776,7 @@ class LibvirtConnTestCase(test.TestCase):
_fake_network_info(self.stubs, 1),
None, None)
self.assertEquals(type(conf.cpu),
- config.LibvirtConfigGuestCPU)
+ vconfig.LibvirtConfigGuestCPU)
self.assertEquals(conf.cpu.mode, "host-model")
self.assertEquals(conf.cpu.model, None)
@@ -815,7 +819,7 @@ class LibvirtConnTestCase(test.TestCase):
_fake_network_info(self.stubs, 1),
None, None)
self.assertEquals(type(conf.cpu),
- config.LibvirtConfigGuestCPU)
+ vconfig.LibvirtConfigGuestCPU)
self.assertEquals(conf.cpu.mode, "host-passthrough")
self.assertEquals(conf.cpu.model, None)
@@ -834,7 +838,7 @@ class LibvirtConnTestCase(test.TestCase):
_fake_network_info(self.stubs, 1),
None, None)
self.assertEquals(type(conf.cpu),
- config.LibvirtConfigGuestCPU)
+ vconfig.LibvirtConfigGuestCPU)
self.assertEquals(conf.cpu.mode, "host-model")
self.assertEquals(conf.cpu.model, None)
@@ -854,7 +858,7 @@ class LibvirtConnTestCase(test.TestCase):
_fake_network_info(self.stubs, 1),
None, None)
self.assertEquals(type(conf.cpu),
- config.LibvirtConfigGuestCPU)
+ vconfig.LibvirtConfigGuestCPU)
self.assertEquals(conf.cpu.mode, "custom")
self.assertEquals(conf.cpu.model, "Penryn")
@@ -880,12 +884,12 @@ class LibvirtConnTestCase(test.TestCase):
# Ensure we have a predictable host CPU
def get_host_capabilities_stub(self):
- cpu = config.LibvirtConfigGuestCPU()
+ cpu = vconfig.LibvirtConfigGuestCPU()
cpu.model = "Opteron_G4"
cpu.vendor = "AMD"
- caps = config.LibvirtConfigCaps()
- caps.host = config.LibvirtConfigCapsHost()
+ caps = vconfig.LibvirtConfigCaps()
+ caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = cpu
return caps
@@ -903,7 +907,7 @@ class LibvirtConnTestCase(test.TestCase):
_fake_network_info(self.stubs, 1),
None, None)
self.assertEquals(type(conf.cpu),
- config.LibvirtConfigGuestCPU)
+ vconfig.LibvirtConfigGuestCPU)
self.assertEquals(conf.cpu.mode, None)
self.assertEquals(conf.cpu.model, "Opteron_G4")
self.assertEquals(conf.cpu.vendor, "AMD")
@@ -924,7 +928,7 @@ class LibvirtConnTestCase(test.TestCase):
_fake_network_info(self.stubs, 1),
None, None)
self.assertEquals(type(conf.cpu),
- config.LibvirtConfigGuestCPU)
+ vconfig.LibvirtConfigGuestCPU)
self.assertEquals(conf.cpu.mode, None)
self.assertEquals(conf.cpu.model, "Penryn")
@@ -1838,9 +1842,9 @@ class LibvirtConnTestCase(test.TestCase):
# This test is supposed to make sure we don't
# override a specifically set uri
#
- # Deliberately not just assigning this string to FLAGS.libvirt_uri and
+ # Deliberately not just assigning this string to CONF.libvirt_uri and
# checking against that later on. This way we make sure the
- # implementation doesn't fiddle around with the FLAGS.
+ # implementation doesn't fiddle around with the CONF.
testuri = 'something completely different'
self.flags(libvirt_uri=testuri)
for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
@@ -1923,11 +1927,11 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.ReplayAll()
return_value = conn.check_can_live_migrate_destination(self.context,
instance_ref, compute_info, compute_info, True)
- self.assertDictMatch(return_value,
- {"filename": "file",
- 'disk_available_mb': 409600,
- "disk_over_commit": False,
- "block_migration": True})
+ self.assertThat({"filename": "file",
+ 'disk_available_mb': 409600,
+ "disk_over_commit": False,
+ "block_migration": True},
+ matchers.DictMatches(return_value))
def test_check_can_live_migrate_dest_all_pass_no_block_migration(self):
instance_ref = db.instance_create(self.context, self.test_instance)
@@ -1949,11 +1953,11 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.ReplayAll()
return_value = conn.check_can_live_migrate_destination(self.context,
instance_ref, compute_info, compute_info, False)
- self.assertDictMatch(return_value,
- {"filename": "file",
- "block_migration": False,
- "disk_over_commit": False,
- "disk_available_mb": None})
+ self.assertThat({"filename": "file",
+ "block_migration": False,
+ "disk_over_commit": False,
+ "disk_available_mb": None},
+ matchers.DictMatches(return_value))
def test_check_can_live_migrate_dest_incompatible_cpu_raises(self):
instance_ref = db.instance_create(self.context, self.test_instance)
@@ -2064,25 +2068,19 @@ class LibvirtConnTestCase(test.TestCase):
def test_live_migration_raises_exception(self):
"""Confirms recover method is called when exceptions are raised."""
# Preparing data
- self.compute = importutils.import_object(FLAGS.compute_manager)
+ self.compute = importutils.import_object(CONF.compute_manager)
instance_dict = {'host': 'fake',
'power_state': power_state.RUNNING,
'vm_state': vm_states.ACTIVE}
instance_ref = db.instance_create(self.context, self.test_instance)
instance_ref = db.instance_update(self.context, instance_ref['uuid'],
instance_dict)
- vol_dict = {'status': 'migrating', 'size': 1}
- volume_ref = db.volume_create(self.context, vol_dict)
- db.volume_attached(self.context,
- volume_ref['id'],
- instance_ref['uuid'],
- '/dev/fake')
# Preparing mocks
vdmock = self.mox.CreateMock(libvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "migrateToURI")
- _bandwidth = FLAGS.live_migration_bandwidth
- vdmock.migrateToURI(FLAGS.live_migration_uri % 'dest',
+ _bandwidth = CONF.live_migration_bandwidth
+ vdmock.migrateToURI(CONF.live_migration_uri % 'dest',
mox.IgnoreArg(),
None,
_bandwidth).AndRaise(libvirt.libvirtError('ERR'))
@@ -2107,10 +2105,7 @@ class LibvirtConnTestCase(test.TestCase):
instance_ref = db.instance_get(self.context, instance_ref['id'])
self.assertTrue(instance_ref['vm_state'] == vm_states.ACTIVE)
self.assertTrue(instance_ref['power_state'] == power_state.RUNNING)
- volume_ref = db.volume_get(self.context, volume_ref['id'])
- self.assertTrue(volume_ref['status'] == 'in-use')
- db.volume_destroy(self.context, volume_ref['id'])
db.instance_destroy(self.context, instance_ref['uuid'])
def test_pre_live_migration_works_correctly_mocked(self):
@@ -2296,14 +2291,14 @@ class LibvirtConnTestCase(test.TestCase):
conn.spawn(self.context, instance, None, [], 'herp',
network_info=network_info)
- path = os.path.join(FLAGS.instances_path, instance.name)
+ path = os.path.join(CONF.instances_path, instance.name)
if os.path.isdir(path):
shutil.rmtree(path)
- path = os.path.join(FLAGS.instances_path, FLAGS.base_dir_name)
+ path = os.path.join(CONF.instances_path, CONF.base_dir_name)
if os.path.isdir(path):
- shutil.rmtree(os.path.join(FLAGS.instances_path,
- FLAGS.base_dir_name))
+ shutil.rmtree(os.path.join(CONF.instances_path,
+ CONF.base_dir_name))
def test_get_console_output_file(self):
@@ -2393,7 +2388,7 @@ class LibvirtConnTestCase(test.TestCase):
def test_get_host_ip_addr(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
ip = conn.get_host_ip_addr()
- self.assertEquals(ip, FLAGS.my_ip)
+ self.assertEquals(ip, CONF.my_ip)
def test_broken_connection(self):
for (error, domain) in (
@@ -2583,14 +2578,14 @@ class LibvirtConnTestCase(test.TestCase):
self.stubs.Set(conn, 'get_instance_disk_info', get_info)
result = conn.get_disk_available_least()
- space = fake_libvirt_utils.get_fs_info(FLAGS.instances_path)['free']
+ space = fake_libvirt_utils.get_fs_info(CONF.instances_path)['free']
self.assertEqual(result, space / 1024 ** 3)
def test_cpu_info(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
def get_host_capabilities_stub(self):
- cpu = config.LibvirtConfigCPU()
+ cpu = vconfig.LibvirtConfigCPU()
cpu.model = "Opteron_G4"
cpu.vendor = "AMD"
cpu.arch = "x86_64"
@@ -2599,20 +2594,20 @@ class LibvirtConnTestCase(test.TestCase):
cpu.threads = 1
cpu.sockets = 4
- cpu.add_feature(config.LibvirtConfigCPUFeature("extapic"))
- cpu.add_feature(config.LibvirtConfigCPUFeature("3dnow"))
+ cpu.add_feature(vconfig.LibvirtConfigCPUFeature("extapic"))
+ cpu.add_feature(vconfig.LibvirtConfigCPUFeature("3dnow"))
- caps = config.LibvirtConfigCaps()
- caps.host = config.LibvirtConfigCapsHost()
+ caps = vconfig.LibvirtConfigCaps()
+ caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = cpu
- guest = config.LibvirtConfigGuest()
+ guest = vconfig.LibvirtConfigGuest()
guest.ostype = vm_mode.HVM
guest.arch = "x86_64"
guest.domtype = ["kvm"]
caps.guests.append(guest)
- guest = config.LibvirtConfigGuest()
+ guest = vconfig.LibvirtConfigGuest()
guest.ostype = vm_mode.HVM
guest.arch = "i686"
guest.domtype = ["kvm"]
@@ -3025,15 +3020,15 @@ class LibvirtConnTestCase(test.TestCase):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
def get_host_capabilities_stub(self):
- caps = config.LibvirtConfigCaps()
+ caps = vconfig.LibvirtConfigCaps()
- guest = config.LibvirtConfigGuest()
+ guest = vconfig.LibvirtConfigGuest()
guest.ostype = 'hvm'
guest.arch = 'x86_64'
guest.domtype = ['kvm', 'qemu']
caps.guests.append(guest)
- guest = config.LibvirtConfigGuest()
+ guest = vconfig.LibvirtConfigGuest()
guest.ostype = 'hvm'
guest.arch = 'i686'
guest.domtype = ['kvm']
@@ -3171,6 +3166,7 @@ class IptablesFirewallTestCase(test.TestCase):
pass
self.fake_libvirt_connection = FakeLibvirtDriver()
self.fw = firewall.IptablesFirewallDriver(
+ fake.FakeVirtAPI(),
get_connection=lambda: self.fake_libvirt_connection)
in_nat_rules = [
@@ -3512,7 +3508,7 @@ class NWFilterTestCase(test.TestCase):
self.fake_libvirt_connection = Mock()
- self.fw = firewall.NWFilterFirewall(
+ self.fw = firewall.NWFilterFirewall(fake.FakeVirtAPI(),
lambda: self.fake_libvirt_connection)
def test_cidr_rule_nwfilter_xml(self):
@@ -4152,7 +4148,7 @@ class LibvirtDriverTestCase(test.TestCase):
_fake_network_info(self.stubs, 1))
def test_cleanup_resize_same_host(self):
- ins_ref = self._create_instance({'host': FLAGS.host})
+ ins_ref = self._create_instance({'host': CONF.host})
def fake_os_path_exists(path):
return True
@@ -4168,7 +4164,7 @@ class LibvirtDriverTestCase(test.TestCase):
_fake_network_info(self.stubs, 1))
def test_cleanup_resize_not_same_host(self):
- host = 'not' + FLAGS.host
+ host = 'not' + CONF.host
ins_ref = self._create_instance({'host': host})
def fake_os_path_exists(path):
diff --git a/nova/tests/test_libvirt_config.py b/nova/tests/test_libvirt_config.py
index a00d5b572..c285d46c0 100644
--- a/nova/tests/test_libvirt_config.py
+++ b/nova/tests/test_libvirt_config.py
@@ -483,6 +483,7 @@ class LibvirtConfigGuestInterfaceTest(LibvirtConfigBaseTest):
obj.source_dev = "br0"
obj.mac_addr = "DE:AD:BE:EF:CA:FE"
obj.model = "virtio"
+ obj.target_dev = "tap12345678"
obj.filtername = "clean-traffic"
obj.filterparams.append({"key": "IP", "value": "192.168.122.1"})
@@ -492,6 +493,7 @@ class LibvirtConfigGuestInterfaceTest(LibvirtConfigBaseTest):
<mac address="DE:AD:BE:EF:CA:FE"/>
<model type="virtio"/>
<source bridge="br0"/>
+ <target dev="tap12345678"/>
<filterref filter="clean-traffic">
<parameter name="IP" value="192.168.122.1"/>
</filterref>
@@ -503,6 +505,7 @@ class LibvirtConfigGuestInterfaceTest(LibvirtConfigBaseTest):
obj.source_dev = "br0"
obj.mac_addr = "DE:AD:BE:EF:CA:FE"
obj.model = "virtio"
+ obj.target_dev = "tap12345678"
obj.vporttype = "openvswitch"
obj.vportparams.append({"key": "instanceid", "value": "foobar"})
@@ -512,6 +515,7 @@ class LibvirtConfigGuestInterfaceTest(LibvirtConfigBaseTest):
<mac address="DE:AD:BE:EF:CA:FE"/>
<model type="virtio"/>
<source bridge="br0"/>
+ <target dev="tap12345678"/>
<virtualport type="openvswitch">
<parameters instanceid="foobar"/>
</virtualport>
@@ -522,6 +526,7 @@ class LibvirtConfigGuestInterfaceTest(LibvirtConfigBaseTest):
obj.net_type = "direct"
obj.mac_addr = "DE:AD:BE:EF:CA:FE"
obj.model = "virtio"
+ obj.target_dev = "tap12345678"
obj.source_dev = "eth0"
obj.vporttype = "802.1Qbh"
@@ -531,6 +536,7 @@ class LibvirtConfigGuestInterfaceTest(LibvirtConfigBaseTest):
<mac address="DE:AD:BE:EF:CA:FE"/>
<model type="virtio"/>
<source mode="private" dev="eth0"/>
+ <target dev="tap12345678"/>
<virtualport type="802.1Qbh"/>
</interface>""")
diff --git a/nova/tests/test_libvirt_utils.py b/nova/tests/test_libvirt_utils.py
new file mode 100644
index 000000000..89410390b
--- /dev/null
+++ b/nova/tests/test_libvirt_utils.py
@@ -0,0 +1,38 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright 2012 NTT Data
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import test
+from nova import utils
+from nova.virt.libvirt import utils as libvirt_utils
+
+
+class LibvirtUtilsTestCase(test.TestCase):
+ def test_get_disk_type(self):
+ path = "disk.config"
+ example_output = """image: disk.config
+file format: raw
+virtual size: 64M (67108864 bytes)
+cluster_size: 65536
+disk size: 96K
+blah BLAH: bb
+"""
+ self.mox.StubOutWithMock(utils, 'execute')
+ utils.execute('env', 'LC_ALL=C', 'LANG=C',
+ 'qemu-img', 'info', path).AndReturn((example_output, ''))
+ self.mox.ReplayAll()
+ disk_type = libvirt_utils.get_disk_type(path)
+ self.assertEqual(disk_type, 'raw')
diff --git a/nova/tests/test_libvirt_vif.py b/nova/tests/test_libvirt_vif.py
index ca52f14ed..af1c657a7 100644
--- a/nova/tests/test_libvirt_vif.py
+++ b/nova/tests/test_libvirt_vif.py
@@ -16,13 +16,13 @@
from lxml import etree
-from nova import flags
+from nova import config
from nova import test
from nova import utils
-from nova.virt.libvirt import config
+from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import vif
-FLAGS = flags.FLAGS
+CONF = config.CONF
class LibvirtVifTestCase(test.TestCase):
@@ -38,7 +38,8 @@ class LibvirtVifTestCase(test.TestCase):
'vlan': 99,
'gateway': '101.168.1.1',
'broadcast': '101.168.1.255',
- 'dns1': '8.8.8.8'
+ 'dns1': '8.8.8.8',
+ 'id': 'network-id-xxx-yyy-zzz'
}
mapping = {
@@ -66,7 +67,7 @@ class LibvirtVifTestCase(test.TestCase):
self.stubs.Set(utils, 'execute', fake_execute)
def _get_instance_xml(self, driver):
- conf = config.LibvirtConfigGuest()
+ conf = vconfig.LibvirtConfigGuest()
conf.virt_type = "qemu"
conf.name = "fake-name"
conf.uuid = "fake-uuid"
@@ -77,6 +78,48 @@ class LibvirtVifTestCase(test.TestCase):
conf.add_device(nic)
return conf.to_xml()
+ def test_multiple_nics(self):
+ conf = vconfig.LibvirtConfigGuest()
+ conf.virt_type = "qemu"
+ conf.name = "fake-name"
+ conf.uuid = "fake-uuid"
+ conf.memory = 100 * 1024
+ conf.vcpus = 4
+
+ # Tests multiple nic configuration and that target_dev is
+ # set for each
+ nics = [{'net_type': 'bridge',
+ 'mac_addr': '00:00:00:00:00:0b',
+ 'source_dev': 'b_source_dev',
+ 'target_dev': 'b_target_dev'},
+ {'net_type': 'ethernet',
+ 'mac_addr': '00:00:00:00:00:0e',
+ 'source_dev': 'e_source_dev',
+ 'target_dev': 'e_target_dev'},
+ {'net_type': 'direct',
+ 'mac_addr': '00:00:00:00:00:0d',
+ 'source_dev': 'd_source_dev',
+ 'target_dev': 'd_target_dev'}]
+
+ for nic in nics:
+ nic_conf = vconfig.LibvirtConfigGuestInterface()
+ nic_conf.net_type = nic['net_type']
+ nic_conf.target_dev = nic['target_dev']
+ nic_conf.mac_addr = nic['mac_addr']
+ nic_conf.source_dev = nic['source_dev']
+ conf.add_device(nic_conf)
+
+ xml = conf.to_xml()
+ doc = etree.fromstring(xml)
+ for nic in nics:
+ path = "./devices/interface/[@type='%s']" % nic['net_type']
+ node = doc.find(path)
+ self.assertEqual(nic['net_type'], node.get("type"))
+ self.assertEqual(nic['mac_addr'],
+ node.find("mac").get("address"))
+ self.assertEqual(nic['target_dev'],
+ node.find("target").get("dev"))
+
def test_bridge_driver(self):
d = vif.LibvirtBridgeDriver()
xml = self._get_instance_xml(d)
@@ -122,7 +165,7 @@ class LibvirtVifTestCase(test.TestCase):
self.assertEqual(node.get("type"), "bridge")
br_name = node.find("source").get("bridge")
- self.assertEqual(br_name, FLAGS.libvirt_ovs_bridge)
+ self.assertEqual(br_name, CONF.libvirt_ovs_bridge)
mac = node.find("mac").get("address")
self.assertEqual(mac, self.mapping['mac'])
vp = node.find("virtualport")
@@ -145,13 +188,13 @@ class LibvirtVifTestCase(test.TestCase):
ret = doc.findall('./devices/interface')
self.assertEqual(len(ret), 1)
node = ret[0]
- self.assertEqual(node.get("type"), "ethernet")
+ self.assertEqual(node.get("type"), "bridge")
dev_name = node.find("target").get("dev")
self.assertTrue(dev_name.startswith("tap"))
mac = node.find("mac").get("address")
self.assertEqual(mac, self.mapping['mac'])
- script = node.find("script").get("path")
- self.assertEquals(script, "")
+ br_name = node.find("source").get("bridge")
+ self.assertTrue(br_name.startswith("brq"))
d.unplug(None, (self.net, self.mapping))
diff --git a/nova/tests/test_loadables.py b/nova/tests/test_loadables.py
new file mode 100644
index 000000000..6d16b9fa8
--- /dev/null
+++ b/nova/tests/test_loadables.py
@@ -0,0 +1,113 @@
+# Copyright 2012 OpenStack LLC. # All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Loadable class handling.
+"""
+
+from nova import exception
+from nova import test
+from nova.tests import fake_loadables
+
+
+class LoadablesTestCase(test.TestCase):
+ def setUp(self):
+ super(LoadablesTestCase, self).setUp()
+ self.fake_loader = fake_loadables.FakeLoader()
+ # The name that we imported above for testing
+ self.test_package = 'nova.tests.fake_loadables'
+
+ def test_loader_init(self):
+ self.assertEqual(self.fake_loader.package, self.test_package)
+ # Test the path of the module
+ ending_path = '/' + self.test_package.replace('.', '/')
+ self.assertTrue(self.fake_loader.path.endswith(ending_path))
+ self.assertEqual(self.fake_loader.loadable_cls_type,
+ fake_loadables.FakeLoadable)
+
+ def _compare_classes(self, classes, expected):
+ class_names = [cls.__name__ for cls in classes]
+ self.assertEqual(set(class_names), set(expected))
+
+ def test_get_all_classes(self):
+ classes = self.fake_loader.get_all_classes()
+ expected_class_names = ['FakeLoadableSubClass1',
+ 'FakeLoadableSubClass2',
+ 'FakeLoadableSubClass5',
+ 'FakeLoadableSubClass6']
+ self._compare_classes(classes, expected_class_names)
+
+ def test_get_matching_classes(self):
+ prefix = self.test_package
+ test_classes = [prefix + '.fake_loadable1.FakeLoadableSubClass1',
+ prefix + '.fake_loadable2.FakeLoadableSubClass5']
+ classes = self.fake_loader.get_matching_classes(test_classes)
+ expected_class_names = ['FakeLoadableSubClass1',
+ 'FakeLoadableSubClass5']
+ self._compare_classes(classes, expected_class_names)
+
+ def test_get_matching_classes_with_underscore(self):
+ prefix = self.test_package
+ test_classes = [prefix + '.fake_loadable1.FakeLoadableSubClass1',
+ prefix + '.fake_loadable2._FakeLoadableSubClass7']
+ self.assertRaises(exception.ClassNotFound,
+ self.fake_loader.get_matching_classes,
+ test_classes)
+
+ def test_get_matching_classes_with_wrong_type1(self):
+ prefix = self.test_package
+ test_classes = [prefix + '.fake_loadable1.FakeLoadableSubClass4',
+ prefix + '.fake_loadable2.FakeLoadableSubClass5']
+ self.assertRaises(exception.ClassNotFound,
+ self.fake_loader.get_matching_classes,
+ test_classes)
+
+ def test_get_matching_classes_with_wrong_type2(self):
+ prefix = self.test_package
+ test_classes = [prefix + '.fake_loadable1.FakeLoadableSubClass1',
+ prefix + '.fake_loadable2.FakeLoadableSubClass8']
+ self.assertRaises(exception.ClassNotFound,
+ self.fake_loader.get_matching_classes,
+ test_classes)
+
+ def test_get_matching_classes_with_one_function(self):
+ prefix = self.test_package
+ test_classes = [prefix + '.fake_loadable1.return_valid_classes',
+ prefix + '.fake_loadable2.FakeLoadableSubClass5']
+ classes = self.fake_loader.get_matching_classes(test_classes)
+ expected_class_names = ['FakeLoadableSubClass1',
+ 'FakeLoadableSubClass2',
+ 'FakeLoadableSubClass5']
+ self._compare_classes(classes, expected_class_names)
+
+ def test_get_matching_classes_with_two_functions(self):
+ prefix = self.test_package
+ test_classes = [prefix + '.fake_loadable1.return_valid_classes',
+ prefix + '.fake_loadable2.return_valid_class']
+ classes = self.fake_loader.get_matching_classes(test_classes)
+ expected_class_names = ['FakeLoadableSubClass1',
+ 'FakeLoadableSubClass2',
+ 'FakeLoadableSubClass6']
+ self._compare_classes(classes, expected_class_names)
+
+ def test_get_matching_classes_with_function_including_invalids(self):
+ # When using a method, no checking is done on valid classes.
+ prefix = self.test_package
+ test_classes = [prefix + '.fake_loadable1.return_invalid_classes',
+ prefix + '.fake_loadable2.return_valid_class']
+ classes = self.fake_loader.get_matching_classes(test_classes)
+ expected_class_names = ['FakeLoadableSubClass1',
+ '_FakeLoadableSubClass3',
+ 'FakeLoadableSubClass4',
+ 'FakeLoadableSubClass6']
+ self._compare_classes(classes, expected_class_names)
diff --git a/nova/tests/test_matchers.py b/nova/tests/test_matchers.py
new file mode 100644
index 000000000..b764b3d45
--- /dev/null
+++ b/nova/tests/test_matchers.py
@@ -0,0 +1,144 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import testtools
+from testtools.tests.matchers import helpers
+
+from nova.tests import matchers
+
+
+class TestDictMatches(testtools.TestCase, helpers.TestMatchersInterface):
+
+ matches_matcher = matchers.DictMatches(
+ {'foo': 'bar', 'baz': 'DONTCARE',
+ 'cat': {'tabby': True, 'fluffy': False}}
+ )
+
+ matches_matches = [
+ {'foo': 'bar', 'baz': 'noox', 'cat': {'tabby': True, 'fluffy': False}},
+ {'foo': 'bar', 'baz': 'quux', 'cat': {'tabby': True, 'fluffy': False}},
+ ]
+
+ matches_mismatches = [
+ {},
+ {'foo': 'bar', 'baz': 'qux'},
+ {'foo': 'bop', 'baz': 'qux',
+ 'cat': {'tabby': True, 'fluffy': False}},
+ {'foo': 'bar', 'baz': 'quux',
+ 'cat': {'tabby': True, 'fluffy': True}},
+ {'foo': 'bar', 'cat': {'tabby': True, 'fluffy': False}},
+ ]
+
+ str_examples = [
+ ("DictMatches({'baz': 'DONTCARE', 'cat':"
+ " {'fluffy': False, 'tabby': True}, 'foo': 'bar'})",
+ matches_matcher),
+ ]
+
+ describe_examples = [
+ ("Keys in d1 and not d2: set(['foo', 'baz', 'cat'])."
+ " Keys in d2 and not d1: set([])", {}, matches_matcher),
+ ("Dictionaries do not match at fluffy. d1: False d2: True",
+ {'foo': 'bar', 'baz': 'quux',
+ 'cat': {'tabby': True, 'fluffy': True}}, matches_matcher),
+ ("Dictionaries do not match at foo. d1: bar d2: bop",
+ {'foo': 'bop', 'baz': 'quux',
+ 'cat': {'tabby': True, 'fluffy': False}}, matches_matcher),
+ ]
+
+
+class TestDictListMatches(testtools.TestCase, helpers.TestMatchersInterface):
+
+ matches_matcher = matchers.DictListMatches(
+ [{'foo': 'bar', 'baz': 'DONTCARE',
+ 'cat': {'tabby': True, 'fluffy': False}},
+ {'dog': 'yorkie'},
+ ])
+
+ matches_matches = [
+ [{'foo': 'bar', 'baz': 'qoox',
+ 'cat': {'tabby': True, 'fluffy': False}},
+ {'dog': 'yorkie'}],
+ [{'foo': 'bar', 'baz': False,
+ 'cat': {'tabby': True, 'fluffy': False}},
+ {'dog': 'yorkie'}],
+ ]
+
+ matches_mismatches = [
+ [],
+ {},
+ [{'foo': 'bar', 'baz': 'qoox',
+ 'cat': {'tabby': True, 'fluffy': True}},
+ {'dog': 'yorkie'}],
+ [{'foo': 'bar', 'baz': False,
+ 'cat': {'tabby': True, 'fluffy': False}},
+ {'cat': 'yorkie'}],
+ [{'foo': 'bop', 'baz': False,
+ 'cat': {'tabby': True, 'fluffy': False}},
+ {'dog': 'yorkie'}],
+ ]
+
+ str_examples = [
+ ("DictListMatches([{'baz': 'DONTCARE', 'cat':"
+ " {'fluffy': False, 'tabby': True}, 'foo': 'bar'},\n"
+ " {'dog': 'yorkie'}])",
+ matches_matcher),
+ ]
+
+ describe_examples = [
+ ("Length mismatch: len(L1)=2 != len(L2)=0", {}, matches_matcher),
+ ("Dictionaries do not match at fluffy. d1: True d2: False",
+ [{'foo': 'bar', 'baz': 'qoox',
+ 'cat': {'tabby': True, 'fluffy': True}},
+ {'dog': 'yorkie'}],
+ matches_matcher),
+ ]
+
+
+class TestDictMatches(testtools.TestCase, helpers.TestMatchersInterface):
+
+ matches_matcher = matchers.IsSubDictOf(
+ {'foo': 'bar', 'baz': 'DONTCARE',
+ 'cat': {'tabby': True, 'fluffy': False}}
+ )
+
+ matches_matches = [
+ {'foo': 'bar', 'baz': 'noox', 'cat': {'tabby': True, 'fluffy': False}},
+ {'foo': 'bar', 'baz': 'quux'}
+ ]
+
+ matches_mismatches = [
+ {'foo': 'bop', 'baz': 'qux',
+ 'cat': {'tabby': True, 'fluffy': False}},
+ {'foo': 'bar', 'baz': 'quux',
+ 'cat': {'tabby': True, 'fluffy': True}},
+ {'foo': 'bar', 'cat': {'tabby': True, 'fluffy': False}, 'dog': None},
+ ]
+
+ str_examples = [
+ ("IsSubDictOf({'foo': 'bar', 'baz': 'DONTCARE',"
+ " 'cat': {'fluffy': False, 'tabby': True}})",
+ matches_matcher),
+ ]
+
+ describe_examples = [
+ ("Dictionaries do not match at fluffy. d1: False d2: True",
+ {'foo': 'bar', 'baz': 'quux',
+ 'cat': {'tabby': True, 'fluffy': True}}, matches_matcher),
+ ("Dictionaries do not match at foo. d1: bar d2: bop",
+ {'foo': 'bop', 'baz': 'quux',
+ 'cat': {'tabby': True, 'fluffy': False}}, matches_matcher),
+ ]
diff --git a/nova/tests/test_metadata.py b/nova/tests/test_metadata.py
index a8c78d4f5..a68ac40fd 100644
--- a/nova/tests/test_metadata.py
+++ b/nova/tests/test_metadata.py
@@ -28,15 +28,15 @@ import webob
from nova.api.metadata import base
from nova.api.metadata import handler
from nova import block_device
+from nova import config
from nova import db
from nova.db.sqlalchemy import api
from nova import exception
-from nova import flags
from nova.network import api as network_api
from nova import test
from nova.tests import fake_network
-FLAGS = flags.FLAGS
+CONF = config.CONF
USER_DATA_STRING = ("This is an encoded string")
ENCODE_USER_DATA_STRING = base64.b64encode(USER_DATA_STRING)
@@ -142,7 +142,7 @@ class MetadataTestCase(test.TestCase):
md = fake_InstanceMetadata(self.stubs, copy(self.instance))
data = md.get_ec2_metadata(version='2009-04-04')
self.assertEqual(data['meta-data']['local-hostname'],
- "%s.%s" % (self.instance['hostname'], FLAGS.dhcp_domain))
+ "%s.%s" % (self.instance['hostname'], CONF.dhcp_domain))
def test_format_instance_mapping(self):
"""Make sure that _format_instance_mappings works"""
diff --git a/nova/tests/test_migrations.py b/nova/tests/test_migrations.py
index 5ec91ca14..d82ae7585 100644
--- a/nova/tests/test_migrations.py
+++ b/nova/tests/test_migrations.py
@@ -304,203 +304,3 @@ class TestMigrations(test.TestCase):
self.assertEqual(version,
migration_api.db_version(engine,
TestMigrations.REPOSITORY))
-
- def test_migration_98(self):
- """Test that migration 98 runs
-
- This test exists to prove bug 1047633 has been fixed
- """
- for key, engine in self.engines.items():
- migration_api.version_control(engine, TestMigrations.REPOSITORY,
- migration.INIT_VERSION)
- migration_api.upgrade(engine, TestMigrations.REPOSITORY, 97)
-
- # Set up a single volume, values don't matter
- metadata = sqlalchemy.schema.MetaData()
- metadata.bind = engine
- volumes = sqlalchemy.Table('volumes', metadata, autoload=True)
- vol_id = '9db3c2e5-8cac-4e94-9e6c-b5f750736727'
- volumes.insert().values(id=vol_id).execute()
-
- migration_api.upgrade(engine, TestMigrations.REPOSITORY, 98)
- migration_api.downgrade(engine, TestMigrations.REPOSITORY, 97)
-
- def test_migration_91(self):
- """Test that migration 91 works correctly.
-
- This test prevents regression of bugs 1052244 and 1052220.
- """
- for key, engine in self.engines.items():
- migration_api.version_control(engine, TestMigrations.REPOSITORY,
- migration.INIT_VERSION)
- migration_api.upgrade(engine, TestMigrations.REPOSITORY, 90)
-
- vol1_id = '10'
- vol1_uuid = '9db3c2e5-8cac-4e94-9e6c-b5f750736727'
-
- vol2_id = '11'
- vol2_uuid = 'fb17fb5a-ca3d-4bba-8903-fc776ea81d78'
-
- snap_id = '7'
- snap_uuid = 'a87e5108-8a2b-4c89-be96-0e8760db2c6a'
-
- inst_id = '0ec45d38-aefd-4c42-a209-361e848240b7'
-
- metadata = sqlalchemy.schema.MetaData()
- metadata.bind = engine
-
- instances = sqlalchemy.Table('instances', metadata, autoload=True)
- volumes = sqlalchemy.Table('volumes', metadata, autoload=True)
- sm_flavors = sqlalchemy.Table(
- 'sm_flavors', metadata, autoload=True)
- sm_backend_config = sqlalchemy.Table(
- 'sm_backend_config', metadata, autoload=True)
- sm_volume = sqlalchemy.Table(
- 'sm_volume', metadata, autoload=True)
- volume_mappings = sqlalchemy.Table(
- 'volume_id_mappings', metadata, autoload=True)
- iscsi_targets = sqlalchemy.Table(
- 'iscsi_targets', metadata, autoload=True)
- volume_metadata = sqlalchemy.Table(
- 'volume_metadata', metadata, autoload=True)
- snapshots = sqlalchemy.Table('snapshots', metadata, autoload=True)
- snapshot_mappings = sqlalchemy.Table(
- 'snapshot_id_mappings', metadata, autoload=True)
- block_device_mapping = sqlalchemy.Table(
- 'block_device_mapping', metadata, autoload=True)
-
- volumes.insert().values(id=vol1_id).execute()
- volume_mappings.insert() \
- .values(id=vol1_id, uuid=vol1_uuid).execute()
- snapshots.insert().values(id=snap_id, volume_id=vol1_id).execute()
- snapshot_mappings.insert() \
- .values(id=snap_id, uuid=snap_uuid).execute()
- volumes.insert().values(id=vol2_id, snapshot_id=snap_id).execute()
- volume_mappings.insert() \
- .values(id=vol2_id, uuid=vol2_uuid).execute()
- sm_flavors.insert().values(id=7).execute()
- sm_backend_config.insert().values(id=7, flavor_id=7).execute()
- sm_volume.insert().values(id=vol1_id, backend_id=7).execute()
- volume_metadata.insert().values(id=7, volume_id=vol1_id).execute()
- iscsi_targets.insert().values(id=7, volume_id=vol1_id).execute()
- instances.insert().values(id=7, uuid=inst_id).execute()
- block_device_mapping.insert()\
- .values(id=7, volume_id=vol1_id, instance_uuid=inst_id) \
- .execute()
-
- vols = volumes.select().execute().fetchall()
- self.assertEqual(set([vol.id for vol in vols]),
- set([vol1_id, vol2_id]))
- self.assertEqual(snap_id, vols[1].snapshot_id)
-
- query = volume_metadata.select(volume_metadata.c.id == 7)
- self.assertEqual(vol1_id, query.execute().fetchone().volume_id)
-
- query = iscsi_targets.select(iscsi_targets.c.id == 7)
- self.assertEqual(vol1_id, query.execute().fetchone().volume_id)
-
- query = block_device_mapping.select(block_device_mapping.c.id == 7)
- self.assertEqual(vol1_id, query.execute().fetchone().volume_id)
-
- snaps = sqlalchemy.select([snapshots.c.id]).execute().fetchall()
- self.assertEqual(set([snap.id for snap in snaps]),
- set([snap_id]))
-
- sm_vols = sqlalchemy.select([sm_volume.c.id]).execute().fetchall()
- self.assertEqual(set([sm_vol.id for sm_vol in sm_vols]),
- set([vol1_id]))
-
- migration_api.upgrade(engine, TestMigrations.REPOSITORY, 91)
-
- vols = volumes.select().execute().fetchall()
- self.assertEqual(set([vol.id for vol in vols]),
- set([vol1_uuid, vol2_uuid]))
- self.assertEqual(snap_uuid, vols[1].snapshot_id)
-
- query = volume_metadata.select(volume_metadata.c.id == 7)
- self.assertEqual(vol1_uuid, query.execute().fetchone().volume_id)
-
- query = iscsi_targets.select(iscsi_targets.c.id == 7)
- self.assertEqual(vol1_uuid, query.execute().fetchone().volume_id)
-
- query = block_device_mapping.select(block_device_mapping.c.id == 7)
- self.assertEqual(vol1_uuid, query.execute().fetchone().volume_id)
-
- snaps = sqlalchemy.select([snapshots.c.id]).execute().fetchall()
- self.assertEqual(set([snap.id for snap in snaps]),
- set([snap_uuid]))
-
- sm_vols = sqlalchemy.select([sm_volume.c.id]).execute().fetchall()
- self.assertEqual(set([sm_vol.id for sm_vol in sm_vols]),
- set([vol1_uuid]))
-
- migration_api.downgrade(engine, TestMigrations.REPOSITORY, 90)
-
- vols = volumes.select().execute().fetchall()
- self.assertEqual(set([vol.id for vol in vols]),
- set([vol1_id, vol2_id]))
- self.assertEqual(snap_id, vols[1].snapshot_id)
-
- query = volume_metadata.select(volume_metadata.c.id == 7)
- self.assertEqual(vol1_id, query.execute().fetchone().volume_id)
-
- query = iscsi_targets.select(iscsi_targets.c.id == 7)
- self.assertEqual(vol1_id, query.execute().fetchone().volume_id)
-
- query = block_device_mapping.select(block_device_mapping.c.id == 7)
- self.assertEqual(vol1_id, query.execute().fetchone().volume_id)
-
- snaps = sqlalchemy.select([snapshots.c.id]).execute().fetchall()
- self.assertEqual(set([snap.id for snap in snaps]),
- set([snap_id]))
-
- sm_vols = sqlalchemy.select([sm_volume.c.id]).execute().fetchall()
- self.assertEqual(set([sm_vol.id for sm_vol in sm_vols]),
- set([vol1_id]))
-
- def test_migration_111(self):
- for key, engine in self.engines.items():
- migration_api.version_control(engine, TestMigrations.REPOSITORY,
- migration.INIT_VERSION)
- migration_api.upgrade(engine, TestMigrations.REPOSITORY, 110)
-
- metadata = sqlalchemy.schema.MetaData()
- metadata.bind = engine
- aggregate_hosts = sqlalchemy.Table('aggregate_hosts', metadata,
- autoload=True)
- host = 'host'
- aggregate_hosts.insert().values(id=1,
- aggregate_id=1, host=host).execute()
-
- migration_api.upgrade(engine, TestMigrations.REPOSITORY, 111)
- agg = sqlalchemy.select([aggregate_hosts.c.host]).execute().first()
- self.assertEqual(host, agg.host)
- aggregate_hosts.insert().values(id=2,
- aggregate_id=2, host=host).execute()
-
- migration_api.downgrade(engine, TestMigrations.REPOSITORY, 111)
- agg = sqlalchemy.select([aggregate_hosts.c.host]).execute().first()
- self.assertEqual(host, agg.host)
-
- def test_migration_133(self):
- for key, engine in self.engines.items():
- migration_api.version_control(engine, TestMigrations.REPOSITORY,
- migration.INIT_VERSION)
- migration_api.upgrade(engine, TestMigrations.REPOSITORY, 132)
-
- # Set up a single volume, values don't matter
- metadata = sqlalchemy.schema.MetaData()
- metadata.bind = engine
- aggregates = sqlalchemy.Table('aggregates', metadata,
- autoload=True)
- name = 'name'
- aggregates.insert().values(id=1, availability_zone='nova',
- aggregate_name=1, name=name).execute()
-
- migration_api.upgrade(engine, TestMigrations.REPOSITORY, 133)
- aggregates.insert().values(id=2, availability_zone='nova',
- aggregate_name=2, name=name).execute()
-
- migration_api.downgrade(engine, TestMigrations.REPOSITORY, 132)
- agg = sqlalchemy.select([aggregates.c.name]).execute().first()
- self.assertEqual(name, agg.name)
diff --git a/nova/tests/test_notifications.py b/nova/tests/test_notifications.py
index 5abe51486..21f869d5e 100644
--- a/nova/tests/test_notifications.py
+++ b/nova/tests/test_notifications.py
@@ -22,9 +22,9 @@ import copy
from nova.compute import instance_types
from nova.compute import task_states
from nova.compute import vm_states
+from nova import config
from nova import context
from nova import db
-from nova import flags
from nova.network import api as network_api
from nova import notifications
from nova.openstack.common import log as logging
@@ -34,7 +34,6 @@ from nova import test
from nova.tests import fake_network
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
class NotificationsTestCase(test.TestCase):
diff --git a/nova/tests/test_nova_manage.py b/nova/tests/test_nova_manage.py
index ec5d452a5..ff316826a 100644
--- a/nova/tests/test_nova_manage.py
+++ b/nova/tests/test_nova_manage.py
@@ -360,13 +360,13 @@ class ProjectCommandsTestCase(test.TestCase):
output = StringIO.StringIO()
sys.stdout = output
self.commands.quota(project_id='admin',
- key='volumes',
+ key='instances',
value='unlimited',
)
sys.stdout = sys.__stdout__
result = output.getvalue()
- self.assertEquals(('volumes: unlimited' in result), True)
+ self.assertEquals(('instances: unlimited' in result), True)
def test_quota_update_invalid_key(self):
self.assertRaises(SystemExit,
diff --git a/nova/tests/test_nova_rootwrap.py b/nova/tests/test_nova_rootwrap.py
index dc615bf5d..420a086eb 100644
--- a/nova/tests/test_nova_rootwrap.py
+++ b/nova/tests/test_nova_rootwrap.py
@@ -69,29 +69,34 @@ class RootwrapTestCase(test.TestCase):
p = subprocess.Popen(["cat"], stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
- f = filters.KillFilter("root", "/bin/cat", "-9", "-HUP")
- f2 = filters.KillFilter("root", "/usr/bin/cat", "-9", "-HUP")
- usercmd = ['kill', '-ALRM', p.pid]
- # Incorrect signal should fail
- self.assertFalse(f.match(usercmd) or f2.match(usercmd))
- usercmd = ['kill', p.pid]
- # Providing no signal should fail
- self.assertFalse(f.match(usercmd) or f2.match(usercmd))
- # Providing matching signal should be allowed
- usercmd = ['kill', '-9', p.pid]
- self.assertTrue(f.match(usercmd) or f2.match(usercmd))
-
- f = filters.KillFilter("root", "/bin/cat")
- f2 = filters.KillFilter("root", "/usr/bin/cat")
- usercmd = ['kill', os.getpid()]
- # Our own PID does not match /bin/sleep, so it should fail
- self.assertFalse(f.match(usercmd) or f2.match(usercmd))
- usercmd = ['kill', 999999]
- # Nonexistent PID should fail
- self.assertFalse(f.match(usercmd) or f2.match(usercmd))
- usercmd = ['kill', p.pid]
- # Providing no signal should work
- self.assertTrue(f.match(usercmd) or f2.match(usercmd))
+ try:
+ f = filters.KillFilter("root", "/bin/cat", "-9", "-HUP")
+ f2 = filters.KillFilter("root", "/usr/bin/cat", "-9", "-HUP")
+ usercmd = ['kill', '-ALRM', p.pid]
+ # Incorrect signal should fail
+ self.assertFalse(f.match(usercmd) or f2.match(usercmd))
+ usercmd = ['kill', p.pid]
+ # Providing no signal should fail
+ self.assertFalse(f.match(usercmd) or f2.match(usercmd))
+ # Providing matching signal should be allowed
+ usercmd = ['kill', '-9', p.pid]
+ self.assertTrue(f.match(usercmd) or f2.match(usercmd))
+
+ f = filters.KillFilter("root", "/bin/cat")
+ f2 = filters.KillFilter("root", "/usr/bin/cat")
+ usercmd = ['kill', os.getpid()]
+ # Our own PID does not match /bin/sleep, so it should fail
+ self.assertFalse(f.match(usercmd) or f2.match(usercmd))
+ usercmd = ['kill', 999999]
+ # Nonexistent PID should fail
+ self.assertFalse(f.match(usercmd) or f2.match(usercmd))
+ usercmd = ['kill', p.pid]
+ # Providing no signal should work
+ self.assertTrue(f.match(usercmd) or f2.match(usercmd))
+ finally:
+ # Terminate the "cat" process and wait for it to finish
+ p.terminate()
+ p.wait()
def test_KillFilter_no_raise(self):
"""Makes sure ValueError from bug 926412 is gone"""
diff --git a/nova/tests/test_objectstore.py b/nova/tests/test_objectstore.py
index 50ce67c80..3b148b509 100644
--- a/nova/tests/test_objectstore.py
+++ b/nova/tests/test_objectstore.py
@@ -28,13 +28,12 @@ import tempfile
from boto import exception as boto_exception
from boto.s3 import connection as s3
-from nova import flags
+from nova import config
from nova.objectstore import s3server
from nova import test
from nova import wsgi
-
-FLAGS = flags.FLAGS
+CONF = config.CONF
# Create a unique temporary directory. We don't delete after test to
# allow checking the contents after running tests. Users and/or tools
@@ -55,14 +54,14 @@ class S3APITestCase(test.TestCase):
self.flags(buckets_path=os.path.join(OSS_TEMPDIR, 'buckets'),
s3_host='127.0.0.1')
- shutil.rmtree(FLAGS.buckets_path)
- os.mkdir(FLAGS.buckets_path)
+ shutil.rmtree(CONF.buckets_path)
+ os.mkdir(CONF.buckets_path)
- router = s3server.S3Application(FLAGS.buckets_path)
+ router = s3server.S3Application(CONF.buckets_path)
self.server = wsgi.Server("S3 Objectstore",
router,
- host=FLAGS.s3_host,
- port=FLAGS.s3_port)
+ host=CONF.s3_host,
+ port=CONF.s3_port)
self.server.start()
if not boto.config.has_section('Boto'):
@@ -71,8 +70,8 @@ class S3APITestCase(test.TestCase):
boto.config.set('Boto', 'num_retries', '0')
conn = s3.S3Connection(aws_access_key_id='fake',
aws_secret_access_key='fake',
- host=FLAGS.s3_host,
- port=FLAGS.s3_port,
+ host=CONF.s3_host,
+ port=CONF.s3_port,
is_secure=False,
calling_format=s3.OrdinaryCallingFormat())
self.conn = conn
diff --git a/nova/tests/test_pipelib.py b/nova/tests/test_pipelib.py
index 96e6b08a9..62c97db2f 100644
--- a/nova/tests/test_pipelib.py
+++ b/nova/tests/test_pipelib.py
@@ -14,14 +14,13 @@
# under the License.
from nova.cloudpipe import pipelib
+from nova import config
from nova import context
from nova import crypto
-from nova import flags
from nova import test
from nova import utils
-
-FLAGS = flags.FLAGS
+CONF = config.CONF
class PipelibTest(test.TestCase):
@@ -50,7 +49,7 @@ class PipelibTest(test.TestCase):
self.cloudpipe.launch_vpn_instance(self.context)
def test_setup_security_group(self):
- group_name = "%s%s" % (self.project, FLAGS.vpn_key_suffix)
+ group_name = "%s%s" % (self.project, CONF.vpn_key_suffix)
# First attemp, does not exist (thus its created)
res1_group = self.cloudpipe.setup_security_group(self.context)
@@ -61,7 +60,7 @@ class PipelibTest(test.TestCase):
self.assertEqual(res1_group, res2_group)
def test_setup_key_pair(self):
- key_name = "%s%s" % (self.project, FLAGS.vpn_key_suffix)
+ key_name = "%s%s" % (self.project, CONF.vpn_key_suffix)
with utils.tempdir() as tmpdir:
self.flags(keys_path=tmpdir)
diff --git a/nova/tests/test_policy.py b/nova/tests/test_policy.py
index c0c487447..531dd97f3 100644
--- a/nova/tests/test_policy.py
+++ b/nova/tests/test_policy.py
@@ -21,18 +21,15 @@ import os.path
import StringIO
import urllib2
+from nova import config
from nova import context
from nova import exception
-from nova import flags
from nova.openstack.common import policy as common_policy
from nova import policy
from nova import test
from nova import utils
-FLAGS = flags.FLAGS
-
-
class PolicyFileTestCase(test.TestCase):
def setUp(self):
super(PolicyFileTestCase, self).setUp()
diff --git a/nova/tests/test_powervm.py b/nova/tests/test_powervm.py
index 8f6f27bb0..e640d7f01 100644
--- a/nova/tests/test_powervm.py
+++ b/nova/tests/test_powervm.py
@@ -18,20 +18,20 @@
Test suite for PowerVMDriver.
"""
-from nova.compute import power_state
+from nova import config
from nova import context
from nova import db
-from nova import flags
from nova import test
+from nova.compute import power_state
from nova.openstack.common import log as logging
from nova.virt import images
+
from nova.virt.powervm import driver as powervm_driver
+from nova.virt.powervm import exception
from nova.virt.powervm import lpar
from nova.virt.powervm import operator
-
-FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
@@ -153,6 +153,28 @@ class PowerVMDriverTestCase(test.TestCase):
state = self.powervm_connection.get_info(self.instance)['state']
self.assertEqual(state, power_state.RUNNING)
+ def test_spawn_cleanup_on_fail(self):
+ """Verify on a failed spawn, we get the original exception raised"""
+ # helper function
+ def raise_(ex):
+ raise ex
+
+ self.flags(powervm_img_local_path='/images/')
+ self.stubs.Set(images, 'fetch_to_raw', lambda *x, **y: None)
+ self.stubs.Set(
+ self.powervm_connection._powervm._operator,
+ 'copy_image_file',
+ lambda *x, **y: raise_(exception.PowerVMImageCreationFailed()))
+ self.stubs.Set(
+ self.powervm_connection._powervm, '_cleanup',
+ lambda *x, **y: raise_(Exception('This should be logged.')))
+
+ self.assertRaises(exception.PowerVMImageCreationFailed,
+ self.powervm_connection.spawn,
+ context.get_admin_context(),
+ self.instance,
+ {'id': 'ANY_ID'}, 's3cr3t', [])
+
def test_destroy(self):
self.powervm_connection.destroy(self.instance, None)
self.stubs.Set(FakeIVMOperator, 'get_lpar', lambda x, y: None)
diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py
index 5cc5dedde..bfcc03b0f 100644
--- a/nova/tests/test_quota.py
+++ b/nova/tests/test_quota.py
@@ -20,22 +20,20 @@ import datetime
from nova import compute
from nova.compute import instance_types
+from nova import config
from nova import context
from nova import db
from nova.db.sqlalchemy import api as sqa_api
from nova.db.sqlalchemy import models as sqa_models
from nova import exception
-from nova import flags
from nova.openstack.common import rpc
from nova.openstack.common import timeutils
from nova import quota
from nova.scheduler import driver as scheduler_driver
from nova import test
import nova.tests.image.fake
-from nova import volume
-
-FLAGS = flags.FLAGS
+CONF = config.CONF
class QuotaIntegrationTestCase(test.TestCase):
@@ -45,8 +43,6 @@ class QuotaIntegrationTestCase(test.TestCase):
self.flags(compute_driver='nova.virt.fake.FakeDriver',
quota_instances=2,
quota_cores=4,
- quota_volumes=2,
- quota_gigabytes=20,
quota_floating_ips=1,
network_manager='nova.network.manager.FlatDHCPManager')
@@ -62,7 +58,7 @@ class QuotaIntegrationTestCase(test.TestCase):
def rpc_call_wrapper(context, topic, msg, timeout=None):
"""Stub out the scheduler creating the instance entry"""
- if (topic == FLAGS.scheduler_topic and
+ if (topic == CONF.scheduler_topic and
msg['method'] == 'run_instance'):
scheduler = scheduler_driver.Scheduler
instance = scheduler().create_instance_db_entry(
@@ -91,17 +87,9 @@ class QuotaIntegrationTestCase(test.TestCase):
inst['vcpus'] = cores
return db.instance_create(self.context, inst)
- def _create_volume(self, size=10):
- """Create a test volume"""
- vol = {}
- vol['user_id'] = self.user_id
- vol['project_id'] = self.project_id
- vol['size'] = size
- return db.volume_create(self.context, vol)['id']
-
def test_too_many_instances(self):
instance_uuids = []
- for i in range(FLAGS.quota_instances):
+ for i in range(CONF.quota_instances):
instance = self._create_instance()
instance_uuids.append(instance['uuid'])
inst_type = instance_types.get_instance_type_by_name('m1.small')
@@ -153,7 +141,7 @@ class QuotaIntegrationTestCase(test.TestCase):
def test_too_many_metadata_items(self):
metadata = {}
- for i in range(FLAGS.quota_metadata_items + 1):
+ for i in range(CONF.quota_metadata_items + 1):
metadata['key%s' % i] = 'value%s' % i
inst_type = instance_types.get_instance_type_by_name('m1.small')
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
@@ -183,38 +171,38 @@ class QuotaIntegrationTestCase(test.TestCase):
def test_max_injected_files(self):
files = []
- for i in xrange(FLAGS.quota_injected_files):
+ for i in xrange(CONF.quota_injected_files):
files.append(('/my/path%d' % i, 'config = test\n'))
self._create_with_injected_files(files) # no QuotaError
def test_too_many_injected_files(self):
files = []
- for i in xrange(FLAGS.quota_injected_files + 1):
+ for i in xrange(CONF.quota_injected_files + 1):
files.append(('/my/path%d' % i, 'my\ncontent%d\n' % i))
self.assertRaises(exception.QuotaError,
self._create_with_injected_files, files)
def test_max_injected_file_content_bytes(self):
- max = FLAGS.quota_injected_file_content_bytes
+ max = CONF.quota_injected_file_content_bytes
content = ''.join(['a' for i in xrange(max)])
files = [('/test/path', content)]
self._create_with_injected_files(files) # no QuotaError
def test_too_many_injected_file_content_bytes(self):
- max = FLAGS.quota_injected_file_content_bytes
+ max = CONF.quota_injected_file_content_bytes
content = ''.join(['a' for i in xrange(max + 1)])
files = [('/test/path', content)]
self.assertRaises(exception.QuotaError,
self._create_with_injected_files, files)
def test_max_injected_file_path_bytes(self):
- max = FLAGS.quota_injected_file_path_bytes
+ max = CONF.quota_injected_file_path_bytes
path = ''.join(['a' for i in xrange(max)])
files = [(path, 'config = quotatest')]
self._create_with_injected_files(files) # no QuotaError
def test_too_many_injected_file_path_bytes(self):
- max = FLAGS.quota_injected_file_path_bytes
+ max = CONF.quota_injected_file_path_bytes
path = ''.join(['a' for i in xrange(max + 1)])
files = [(path, 'config = quotatest')]
self.assertRaises(exception.QuotaError,
@@ -742,8 +730,6 @@ class DbQuotaDriverTestCase(test.TestCase):
instances=10,
cores=20,
ram=50 * 1024,
- volumes=10,
- gigabytes=1000,
floating_ips=10,
metadata_items=128,
injected_files=5,
@@ -762,7 +748,6 @@ class DbQuotaDriverTestCase(test.TestCase):
return dict(
instances=5,
ram=25 * 1024,
- gigabytes=500,
metadata_items=64,
injected_file_content_bytes=5 * 1024,
)
@@ -778,8 +763,6 @@ class DbQuotaDriverTestCase(test.TestCase):
instances=5,
cores=20,
ram=25 * 1024,
- volumes=10,
- gigabytes=500,
floating_ips=10,
metadata_items=64,
injected_files=5,
@@ -799,7 +782,6 @@ class DbQuotaDriverTestCase(test.TestCase):
self.assertEqual(result, dict(
instances=5,
ram=25 * 1024,
- gigabytes=500,
metadata_items=64,
injected_file_content_bytes=5 * 1024,
))
@@ -810,7 +792,6 @@ class DbQuotaDriverTestCase(test.TestCase):
self.assertEqual(project_id, 'test_project')
return dict(
cores=10,
- gigabytes=50,
injected_files=2,
injected_file_path_bytes=127,
)
@@ -822,8 +803,6 @@ class DbQuotaDriverTestCase(test.TestCase):
instances=dict(in_use=2, reserved=2),
cores=dict(in_use=4, reserved=4),
ram=dict(in_use=10 * 1024, reserved=0),
- volumes=dict(in_use=2, reserved=0),
- gigabytes=dict(in_use=10, reserved=0),
floating_ips=dict(in_use=2, reserved=0),
metadata_items=dict(in_use=0, reserved=0),
injected_files=dict(in_use=0, reserved=0),
@@ -863,17 +842,7 @@ class DbQuotaDriverTestCase(test.TestCase):
in_use=10 * 1024,
reserved=0,
),
- volumes=dict(
- limit=10,
- in_use=2,
- reserved=0,
- ),
- gigabytes=dict(
- limit=50,
- in_use=10,
- reserved=0,
- ),
- floating_ips=dict(
+ floating_ips=dict(
limit=10,
in_use=2,
reserved=0,
@@ -941,17 +910,7 @@ class DbQuotaDriverTestCase(test.TestCase):
in_use=10 * 1024,
reserved=0,
),
- volumes=dict(
- limit=10,
- in_use=2,
- reserved=0,
- ),
- gigabytes=dict(
- limit=50,
- in_use=10,
- reserved=0,
- ),
- floating_ips=dict(
+ floating_ips=dict(
limit=10,
in_use=2,
reserved=0,
@@ -1020,16 +979,6 @@ class DbQuotaDriverTestCase(test.TestCase):
in_use=10 * 1024,
reserved=0,
),
- volumes=dict(
- limit=10,
- in_use=2,
- reserved=0,
- ),
- gigabytes=dict(
- limit=50,
- in_use=10,
- reserved=0,
- ),
floating_ips=dict(
limit=10,
in_use=2,
@@ -1089,12 +1038,7 @@ class DbQuotaDriverTestCase(test.TestCase):
in_use=4,
reserved=4,
),
- gigabytes=dict(
- limit=50,
- in_use=10,
- reserved=0,
- ),
- injected_files=dict(
+ injected_files=dict(
limit=2,
in_use=0,
reserved=0,
@@ -1126,12 +1070,6 @@ class DbQuotaDriverTestCase(test.TestCase):
ram=dict(
limit=25 * 1024,
),
- volumes=dict(
- limit=10,
- ),
- gigabytes=dict(
- limit=50,
- ),
floating_ips=dict(
limit=10,
),
@@ -1207,7 +1145,6 @@ class DbQuotaDriverTestCase(test.TestCase):
'test_class'),
quota.QUOTAS._resources,
['instances', 'cores', 'ram',
- 'volumes', 'gigabytes',
'floating_ips', 'security_groups'],
True)
@@ -1216,8 +1153,6 @@ class DbQuotaDriverTestCase(test.TestCase):
instances=10,
cores=20,
ram=50 * 1024,
- volumes=10,
- gigabytes=1000,
floating_ips=10,
security_groups=10,
))
@@ -1482,7 +1417,7 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
self.stubs.Set(sqa_api, 'get_session', fake_get_session)
self.stubs.Set(sqa_api, '_get_quota_usages', fake_get_quota_usages)
- self.stubs.Set(sqa_api, 'quota_usage_create', fake_quota_usage_create)
+ self.stubs.Set(sqa_api, '_quota_usage_create', fake_quota_usage_create)
self.stubs.Set(sqa_api, 'reservation_create', fake_reservation_create)
timeutils.set_time_override()
diff --git a/nova/tests/test_service.py b/nova/tests/test_service.py
index 3f31e8e01..a1a28d05a 100644
--- a/nova/tests/test_service.py
+++ b/nova/tests/test_service.py
@@ -23,11 +23,10 @@ Unit Tests for remote procedure calls using queue
import mox
import sys
-
+from nova import config
from nova import context
from nova import db
from nova import exception
-from nova import flags
from nova import manager
from nova.openstack.common import cfg
from nova import service
@@ -47,7 +46,8 @@ test_service_opts = [
help="Port number to bind test service to"),
]
-flags.FLAGS.register_opts(test_service_opts)
+CONF = config.CONF
+CONF.register_opts(test_service_opts)
class FakeManager(manager.Manager):
diff --git a/nova/tests/test_utils.py b/nova/tests/test_utils.py
index 8b883f755..ed38de8c7 100644
--- a/nova/tests/test_utils.py
+++ b/nova/tests/test_utils.py
@@ -27,15 +27,13 @@ from eventlet import greenpool
import mox
import nova
+from nova import config
from nova import exception
-from nova import flags
from nova.openstack.common import timeutils
-from nova.openstack.common import uuidutils
from nova import test
from nova import utils
-
-FLAGS = flags.FLAGS
+CONF = config.CONF
class ByteConversionTest(test.TestCase):
@@ -382,7 +380,7 @@ class GenericUtilsTestCase(test.TestCase):
def test_generate_glance_url(self):
generated_url = utils.generate_glance_url()
- actual_url = "http://%s:%d" % (FLAGS.glance_host, FLAGS.glance_port)
+ actual_url = "http://%s:%d" % (CONF.glance_host, CONF.glance_port)
self.assertEqual(generated_url, actual_url)
def test_read_cached_file(self):
@@ -509,31 +507,6 @@ class GenericUtilsTestCase(test.TestCase):
self.assertEquals(h1, h2)
-class IsUUIDLikeTestCase(test.TestCase):
- def assertUUIDLike(self, val, expected):
- result = uuidutils.is_uuid_like(val)
- self.assertEqual(result, expected)
-
- def test_good_uuid(self):
- val = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
- self.assertUUIDLike(val, True)
-
- def test_integer_passed(self):
- val = 1
- self.assertUUIDLike(val, False)
-
- def test_non_uuid_string_passed(self):
- val = 'foo-fooo'
- self.assertUUIDLike(val, False)
-
- def test_non_uuid_string_passed2(self):
- val = 'xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx'
- self.assertUUIDLike(val, False)
-
- def test_gen_valid_uuid(self):
- self.assertUUIDLike(str(utils.gen_uuid()), True)
-
-
class MonkeyPatchTestCase(test.TestCase):
"""Unit test for utils.monkey_patch()."""
def setUp(self):
@@ -780,8 +753,23 @@ class MkfsTestCase(test.TestCase):
def test_mkfs(self):
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('mkfs', '-t', 'ext4', '-F', '/my/block/dev')
+ utils.execute('mkfs', '-t', 'msdos', '/my/msdos/block/dev')
utils.execute('mkswap', '/my/swap/block/dev')
self.mox.ReplayAll()
utils.mkfs('ext4', '/my/block/dev')
+ utils.mkfs('msdos', '/my/msdos/block/dev')
utils.mkfs('swap', '/my/swap/block/dev')
+
+ def test_mkfs_with_label(self):
+ self.mox.StubOutWithMock(utils, 'execute')
+ utils.execute('mkfs', '-t', 'ext4', '-F',
+ '-L', 'ext4-vol', '/my/block/dev')
+ utils.execute('mkfs', '-t', 'msdos',
+ '-n', 'msdos-vol', '/my/msdos/block/dev')
+ utils.execute('mkswap', '-L', 'swap-vol', '/my/swap/block/dev')
+ self.mox.ReplayAll()
+
+ utils.mkfs('ext4', '/my/block/dev', 'ext4-vol')
+ utils.mkfs('msdos', '/my/msdos/block/dev', 'msdos-vol')
+ utils.mkfs('swap', '/my/swap/block/dev', 'swap-vol')
diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py
index 16775054f..67c52ed3d 100644
--- a/nova/tests/test_virt.py
+++ b/nova/tests/test_virt.py
@@ -17,8 +17,8 @@
import os
+from nova import config
from nova import exception
-from nova import flags
from nova import test
from nova import tests
from nova import utils
@@ -27,8 +27,6 @@ from nova.virt import driver
from nova.openstack.common import jsonutils
-FLAGS = flags.FLAGS
-
class TestVirtDriver(test.TestCase):
def test_block_device(self):
diff --git a/nova/tests/test_vmwareapi.py b/nova/tests/test_vmwareapi.py
index 757ec2bf2..d6bce87f3 100644
--- a/nova/tests/test_vmwareapi.py
+++ b/nova/tests/test_vmwareapi.py
@@ -20,10 +20,10 @@ Test suite for VMWareAPI.
"""
from nova.compute import power_state
+from nova import config
from nova import context
from nova import db
from nova import exception
-from nova import flags
from nova import test
import nova.tests.image.fake
from nova.tests.vmwareapi import db_fakes
@@ -32,9 +32,6 @@ from nova.virt.vmwareapi import driver
from nova.virt.vmwareapi import fake as vmwareapi_fake
-FLAGS = flags.FLAGS
-
-
class VMWareAPIVMTestCase(test.TestCase):
"""Unit tests for Vmware API connection calls."""
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index 54f7948b6..53ba9eea7 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -29,10 +29,10 @@ from nova.compute import instance_types
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_states
+from nova import config
from nova import context
from nova import db
from nova import exception
-from nova import flags
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
@@ -41,6 +41,7 @@ from nova.tests.db import fakes as db_fakes
from nova.tests import fake_network
from nova.tests import fake_utils
import nova.tests.image.fake as fake_image
+from nova.tests import matchers
from nova.tests.xenapi import stubs
from nova.virt import fake
from nova.virt.xenapi import agent
@@ -55,7 +56,7 @@ from nova.virt.xenapi import volume_utils
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
+CONF = config.CONF
IMAGE_MACHINE = '1'
IMAGE_KERNEL = '2'
@@ -173,7 +174,7 @@ class XenAPIVolumeTestCase(stubs.XenAPITestBase):
vol['user_id'] = 'fake'
vol['project_id'] = 'fake'
vol['host'] = 'localhost'
- vol['availability_zone'] = FLAGS.node_availability_zone
+ vol['availability_zone'] = CONF.node_availability_zone
vol['status'] = "creating"
vol['attach_status'] = "detached"
return db.volume_create(self.context, vol)
@@ -235,7 +236,6 @@ class XenAPIVolumeTestCase(stubs.XenAPITestBase):
"""This shows how to test Ops classes' methods."""
stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
- volume = self._create_volume()
instance = db.instance_create(self.context, self.instance_values)
vm = xenapi_fake.create_vm(instance.name, 'Running')
result = conn.attach_volume(self._make_connection_info(),
@@ -253,7 +253,6 @@ class XenAPIVolumeTestCase(stubs.XenAPITestBase):
stubs.stubout_session(self.stubs,
stubs.FakeSessionForVolumeFailedTests)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
- volume = self._create_volume()
instance = db.instance_create(self.context, self.instance_values)
xenapi_fake.create_vm(instance.name, 'Running')
self.assertRaises(exception.VolumeDriverNotFound,
@@ -267,7 +266,7 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
"""Unit tests for VM operations."""
def setUp(self):
super(XenAPIVMTestCase, self).setUp()
- self.network = importutils.import_object(FLAGS.network_manager)
+ self.network = importutils.import_object(CONF.network_manager)
self.flags(disable_process_locking=True,
instance_name_template='%d',
firewall_driver='nova.virt.xenapi.firewall.'
@@ -277,7 +276,7 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
xenapi_fake.create_local_srs()
xenapi_fake.create_local_pifs()
db_fakes.stub_out_db_instance_api(self.stubs)
- xenapi_fake.create_network('fake', FLAGS.flat_network_bridge)
+ xenapi_fake.create_network('fake', CONF.flat_network_bridge)
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
stubs.stubout_get_this_vm_uuid(self.stubs)
stubs.stubout_is_vdi_pv(self.stubs)
@@ -312,7 +311,8 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
fake_image.FakeImageService_reset()
def test_init_host(self):
- session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
+ session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
+ fake.FakeVirtAPI())
vm = vm_utils._get_this_vm_ref(session)
# Local root disk
vdi0 = xenapi_fake.create_vdi('compute', None)
@@ -361,7 +361,7 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
}
instance = self._create_instance()
expected = self.conn.get_diagnostics(instance)
- self.assertDictMatch(fake_diagnostics, expected)
+ self.assertThat(fake_diagnostics, matchers.DictMatches(expected))
def test_instance_snapshot_fails_with_no_primary_vdi(self):
def create_bad_vbd(session, vm_ref, vdi_ref, userdevice,
@@ -513,17 +513,19 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
self.assertEquals(self.vm['HVM_boot_policy'], '')
def _list_vdis(self):
- url = FLAGS.xenapi_connection_url
- username = FLAGS.xenapi_connection_username
- password = FLAGS.xenapi_connection_password
- session = xenapi_conn.XenAPISession(url, username, password)
+ url = CONF.xenapi_connection_url
+ username = CONF.xenapi_connection_username
+ password = CONF.xenapi_connection_password
+ session = xenapi_conn.XenAPISession(url, username, password,
+ fake.FakeVirtAPI())
return session.call_xenapi('VDI.get_all')
def _list_vms(self):
- url = FLAGS.xenapi_connection_url
- username = FLAGS.xenapi_connection_username
- password = FLAGS.xenapi_connection_password
- session = xenapi_conn.XenAPISession(url, username, password)
+ url = CONF.xenapi_connection_url
+ username = CONF.xenapi_connection_username
+ password = CONF.xenapi_connection_password
+ session = xenapi_conn.XenAPISession(url, username, password,
+ fake.FakeVirtAPI())
return session.call_xenapi('VM.get_all')
def _check_vdis(self, start_list, end_list):
@@ -774,7 +776,7 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
self.network.allocate_for_instance(ctxt,
instance_id=2,
instance_uuid='00000000-0000-0000-0000-000000000002',
- host=FLAGS.host,
+ host=CONF.host,
vpn=None,
rxtx_factor=3,
project_id=self.project_id)
@@ -816,7 +818,8 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
def test_rescue(self):
instance = self._create_instance()
- session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
+ session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
+ fake.FakeVirtAPI())
vm_ref = vm_utils.lookup(session, instance.name)
swap_vdi_ref = xenapi_fake.create_vdi('swap', None)
@@ -882,7 +885,8 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
conn.reboot(instance, None, "SOFT")
def test_reboot_halted(self):
- session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
+ session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
+ fake.FakeVirtAPI())
instance = self._create_instance(spawn=False)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
xenapi_fake.create_vm(instance.name, 'Halted')
@@ -898,7 +902,7 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
self.assertRaises(xenapi_fake.Failure, conn.reboot, instance,
None, "SOFT")
- def test_maintenance_mode(self):
+ def _test_maintenance_mode(self, find_host, find_aggregate):
real_call_xenapi = self.conn._session.call_xenapi
instance = self._create_instance(spawn=True)
api_calls = {}
@@ -912,9 +916,19 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
return real_call_xenapi(method, *args)
self.stubs.Set(self.conn._session, 'call_xenapi', fake_call_xenapi)
- # Always find the 'bar' destination host
+ def fake_aggregate_get(context, host, key):
+ if find_aggregate:
+ return [{'fake': 'aggregate'}]
+ else:
+ return []
+ self.stubs.Set(self.conn.virtapi, 'aggregate_get_by_host',
+ fake_aggregate_get)
+
def fake_host_find(context, session, src, dst):
- return 'bar'
+ if find_host:
+ return 'bar'
+ else:
+ raise exception.NoValidHost("I saw this one coming...")
self.stubs.Set(host, '_host_find', fake_host_find)
result = self.conn.host_maintenance_mode('bar', 'on_maintenance')
@@ -929,6 +943,34 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
self.assertTrue(instance['vm_state'], vm_states.ACTIVE)
self.assertTrue(instance['task_state'], task_states.MIGRATING)
+ def test_maintenance_mode(self):
+ self._test_maintenance_mode(True, True)
+
+ def test_maintenance_mode_no_host(self):
+ self.assertRaises(exception.NoValidHost,
+ self._test_maintenance_mode, False, True)
+
+ def test_maintenance_mode_no_aggregate(self):
+ self.assertRaises(exception.NotFound,
+ self._test_maintenance_mode, True, False)
+
+ def test_session_virtapi(self):
+ was = {'called': False}
+
+ def fake_aggregate_get_by_host(self, *args, **kwargs):
+ was['called'] = True
+ raise Exception()
+ self.stubs.Set(self.conn._session._virtapi, "aggregate_get_by_host",
+ fake_aggregate_get_by_host)
+
+ self.stubs.Set(self.conn._session, "is_slave", True)
+
+ try:
+ self.conn._session._get_host_uuid()
+ except Exception:
+ pass
+ self.assertTrue(was['called'])
+
def _create_instance(self, instance_id=1, spawn=True):
"""Creates and spawns a test instance."""
instance_values = {
@@ -1005,7 +1047,7 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
db_fakes.stub_out_db_instance_api(self.stubs)
- xenapi_fake.create_network('fake', FLAGS.flat_network_bridge)
+ xenapi_fake.create_network('fake', CONF.flat_network_bridge)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
@@ -1415,7 +1457,8 @@ class XenAPIAutoDiskConfigTestCase(stubs.XenAPITestBase):
fake_resize_part_and_fs)
ctx = context.RequestContext(self.user_id, self.project_id)
- session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
+ session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
+ fake.FakeVirtAPI())
disk_image_type = vm_utils.ImageType.DISK_VHD
instance = db.instance_create(self.context, self.instance_values)
@@ -1501,7 +1544,8 @@ class XenAPIGenerateLocal(stubs.XenAPITestBase):
def assertCalled(self, instance):
ctx = context.RequestContext(self.user_id, self.project_id)
- session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
+ session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
+ fake.FakeVirtAPI())
disk_image_type = vm_utils.ImageType.DISK_VHD
vm_ref = xenapi_fake.create_vm(instance['name'], 'Halted')
@@ -1704,7 +1748,7 @@ class XenAPIDom0IptablesFirewallTestCase(stubs.XenAPITestBase):
stubs.stubout_session(self.stubs, stubs.FakeSessionForFirewallTests,
test_case=self)
self.context = context.RequestContext(self.user_id, self.project_id)
- self.network = importutils.import_object(FLAGS.network_manager)
+ self.network = importutils.import_object(CONF.network_manager)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.fw = self.conn._vmops.firewall_driver
@@ -1958,7 +2002,8 @@ class XenAPISRSelectionTestCase(stubs.XenAPITestBase):
"""Ensure StorageRepositoryNotFound is raise when wrong filter."""
self.flags(sr_matching_filter='yadayadayada')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
+ session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
+ fake.FakeVirtAPI())
self.assertRaises(exception.StorageRepositoryNotFound,
vm_utils.safe_find_sr, session)
@@ -1966,7 +2011,8 @@ class XenAPISRSelectionTestCase(stubs.XenAPITestBase):
"""Ensure the default local-storage is found."""
self.flags(sr_matching_filter='other-config:i18n-key=local-storage')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
+ session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
+ fake.FakeVirtAPI())
host_ref = xenapi_fake.get_all('host')[0]
local_sr = xenapi_fake.create_sr(
name_label='Fake Storage',
@@ -1982,7 +2028,8 @@ class XenAPISRSelectionTestCase(stubs.XenAPITestBase):
"""Ensure the SR is found when using a different filter."""
self.flags(sr_matching_filter='other-config:my_fake_sr=true')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
+ session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
+ fake.FakeVirtAPI())
host_ref = xenapi_fake.get_all('host')[0]
local_sr = xenapi_fake.create_sr(name_label='Fake Storage',
type='lvm',
@@ -1995,7 +2042,8 @@ class XenAPISRSelectionTestCase(stubs.XenAPITestBase):
"""Ensure the default SR is found regardless of other-config."""
self.flags(sr_matching_filter='default-sr:true')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
+ session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
+ fake.FakeVirtAPI())
pool_ref = xenapi_fake.create_pool('')
expected = vm_utils.safe_find_sr(session)
self.assertEqual(session.call_xenapi('pool.get_default_SR', pool_ref),
@@ -2027,12 +2075,13 @@ class XenAPIAggregateTestCase(stubs.XenAPITestBase):
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver',
host='host',
- compute_driver='xenapi.XenAPIDriver')
+ compute_driver='xenapi.XenAPIDriver',
+ node_availability_zone='avail_zone1')
host_ref = xenapi_fake.get_all('host')[0]
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.context = context.get_admin_context()
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
- self.compute = importutils.import_object(FLAGS.compute_manager)
+ self.compute = importutils.import_object(CONF.compute_manager)
self.api = compute_api.AggregateAPI()
values = {'name': 'test_aggr',
'availability_zone': 'test_zone',
@@ -2092,7 +2141,8 @@ class XenAPIAggregateTestCase(stubs.XenAPITestBase):
self.conn._pool.add_to_aggregate(self.context, aggregate, "host")
result = db.aggregate_get(self.context, aggregate.id)
self.assertTrue(fake_init_pool.called)
- self.assertDictMatch(self.fake_metadata, result.metadetails)
+ self.assertThat(self.fake_metadata,
+ matchers.DictMatches(result.metadetails))
def test_join_slave(self):
"""Ensure join_slave gets called when the request gets to master."""
@@ -2170,8 +2220,9 @@ class XenAPIAggregateTestCase(stubs.XenAPITestBase):
self.conn._pool.remove_from_aggregate(self.context, aggregate, "host")
result = db.aggregate_get(self.context, aggregate.id)
self.assertTrue(fake_clear_pool.called)
- self.assertDictMatch({pool_states.POOL_FLAG: 'XenAPI',
- pool_states.KEY: pool_states.ACTIVE}, result.metadetails)
+ self.assertThat({pool_states.POOL_FLAG: 'XenAPI',
+ pool_states.KEY: pool_states.ACTIVE},
+ matchers.DictMatches(result.metadetails))
def test_remote_master_non_empty_pool(self):
"""Ensure AggregateError is raised if removing the master."""
@@ -2550,7 +2601,7 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBase):
self.metadetails = {"host": "test_host_uuid"}
def fake_aggregate_get_by_host(context, host, key=None):
- self.assertEqual(FLAGS.host, host)
+ self.assertEqual(CONF.host, host)
return [fake_aggregate()]
self.stubs.Set(db, "aggregate_get_by_host",
@@ -2567,7 +2618,7 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBase):
self.metadetails = {"dest_other": "test_host_uuid"}
def fake_aggregate_get_by_host(context, host, key=None):
- self.assertEqual(FLAGS.host, host)
+ self.assertEqual(CONF.host, host)
return [fake_aggregate()]
self.stubs.Set(db, "aggregate_get_by_host",
diff --git a/nova/tests/utils.py b/nova/tests/utils.py
index 328cd8d67..d780edc6f 100644
--- a/nova/tests/utils.py
+++ b/nova/tests/utils.py
@@ -16,12 +16,12 @@
import platform
+import nova.config
import nova.context
import nova.db
-import nova.flags
from nova.image import glance
-FLAGS = nova.flags.FLAGS
+CONF = nova.config.CONF
def get_test_admin_context():
@@ -74,7 +74,7 @@ def get_test_instance(context=None):
def get_test_network_info(count=1):
- ipv6 = FLAGS.use_ipv6
+ ipv6 = CONF.use_ipv6
fake = 'fake'
fake_ip = '0.0.0.0/0'
fake_ip_2 = '0.0.0.1/0'
diff --git a/nova/tests/vmwareapi/db_fakes.py b/nova/tests/vmwareapi/db_fakes.py
index a469c4706..dd19f4929 100644
--- a/nova/tests/vmwareapi/db_fakes.py
+++ b/nova/tests/vmwareapi/db_fakes.py
@@ -20,6 +20,7 @@ Stubouts, mocks and fixtures for the test suite
"""
import time
+import uuid
from nova.compute import task_states
from nova.compute import vm_states
@@ -62,7 +63,7 @@ def stub_out_db_instance_api(stubs):
base_options = {
'name': values['name'],
'id': values['id'],
- 'uuid': utils.gen_uuid(),
+ 'uuid': uuid.uuid4(),
'reservation_id': utils.generate_uid('r'),
'image_ref': values['image_ref'],
'kernel_id': values['kernel_id'],
diff --git a/nova/tests/xenapi/test_vm_utils.py b/nova/tests/xenapi/test_vm_utils.py
index c78d42fd3..3a8c9a640 100644
--- a/nova/tests/xenapi/test_vm_utils.py
+++ b/nova/tests/xenapi/test_vm_utils.py
@@ -1,4 +1,6 @@
import mox
+from nova import context
+from nova import db
from nova import exception
from nova.tests.xenapi import stubs
from nova.virt.xenapi import driver as xenapi_conn
@@ -137,3 +139,50 @@ class VMRefOrRaiseVMNotFoundTestCase(unittest.TestCase):
self.assertTrue(
'somename' in str(e))
mock.VerifyAll()
+
+
+class BittorrentTestCase(stubs.XenAPITestBase):
+ def setUp(self):
+ super(BittorrentTestCase, self).setUp()
+ self.context = context.get_admin_context()
+
+ def test_image_uses_bittorrent(self):
+ sys_meta = {'image_bittorrent': True}
+ instance = db.instance_create(self.context,
+ {'system_metadata': sys_meta})
+ instance = db.instance_get_by_uuid(self.context, instance['uuid'])
+ self.flags(xenapi_torrent_images='some')
+ self.assertTrue(vm_utils._image_uses_bittorrent(self.context,
+ instance))
+
+ def _test_create_image(self, cache_type):
+ sys_meta = {'image_cache_in_nova': True}
+ instance = db.instance_create(self.context,
+ {'system_metadata': sys_meta})
+ instance = db.instance_get_by_uuid(self.context, instance['uuid'])
+ self.flags(cache_images=cache_type)
+
+ was = {'called': None}
+
+ def fake_create_cached_image(*args):
+ was['called'] = 'some'
+ return {}
+ self.stubs.Set(vm_utils, '_create_cached_image',
+ fake_create_cached_image)
+
+ def fake_fetch_image(*args):
+ was['called'] = 'none'
+ return {}
+ self.stubs.Set(vm_utils, '_fetch_image',
+ fake_fetch_image)
+
+ vm_utils._create_image(self.context, None, instance,
+ 'foo', 'bar', 'baz')
+
+ self.assertEqual(was['called'], cache_type)
+
+ def test_create_image_cached(self):
+ self._test_create_image('some')
+
+ def test_create_image_uncached(self):
+ self._test_create_image('none')
diff --git a/nova/utils.py b/nova/utils.py
index 398c620f8..38fdc6f0f 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -37,7 +37,6 @@ import struct
import sys
import tempfile
import time
-import uuid
import weakref
from xml.sax import saxutils
@@ -49,7 +48,6 @@ import netaddr
from nova import config
from nova import exception
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common import importutils
@@ -180,12 +178,20 @@ def execute(*cmd, **kwargs):
try:
LOG.debug(_('Running cmd (subprocess): %s'), ' '.join(cmd))
_PIPE = subprocess.PIPE # pylint: disable=E1101
+
+ if os.name == 'nt':
+ preexec_fn = None
+ close_fds = False
+ else:
+ preexec_fn = _subprocess_setup
+ close_fds = True
+
obj = subprocess.Popen(cmd,
stdin=_PIPE,
stdout=_PIPE,
stderr=_PIPE,
- close_fds=True,
- preexec_fn=_subprocess_setup,
+ close_fds=close_fds,
+ preexec_fn=preexec_fn,
shell=shell)
result = None
if process_input is not None:
@@ -768,10 +774,6 @@ def parse_server_string(server_str):
return ('', '')
-def gen_uuid():
- return uuid.uuid4()
-
-
def bool_from_str(val):
"""Convert a string representation of a bool into a bool value"""
@@ -1160,17 +1162,17 @@ def mkfs(fs, path, label=None):
:param label: Volume label to use
"""
if fs == 'swap':
- execute('mkswap', path)
+ args = ['mkswap']
else:
args = ['mkfs', '-t', fs]
- #add -F to force no interactive excute on non-block device.
- if fs in ('ext3', 'ext4'):
- args.extend(['-F'])
- if label:
- if fs in ('msdos', 'vfat'):
- label_opt = '-n'
- else:
- label_opt = '-L'
- args.extend([label_opt, label])
- args.append(path)
- execute(*args)
+ #add -F to force no interactive execute on non-block device.
+ if fs in ('ext3', 'ext4'):
+ args.extend(['-F'])
+ if label:
+ if fs in ('msdos', 'vfat'):
+ label_opt = '-n'
+ else:
+ label_opt = '-L'
+ args.extend([label_opt, label])
+ args.append(path)
+ execute(*args)
diff --git a/nova/virt/baremetal/__init__.py b/nova/virt/baremetal/db/__init__.py
index 520feecbd..ad883f505 100644
--- a/nova/virt/baremetal/__init__.py
+++ b/nova/virt/baremetal/db/__init__.py
@@ -1,6 +1,5 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2011 University of Southern California
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@@ -14,5 +13,4 @@
# License for the specific language governing permissions and limitations
# under the License.
-# NOTE(sdague) for more convenient compute_driver names
-from nova.virt.baremetal.driver import BareMetalDriver
+from nova.virt.baremetal.db.api import *
diff --git a/nova/virt/baremetal/db/api.py b/nova/virt/baremetal/db/api.py
new file mode 100644
index 000000000..a9b6b3fe2
--- /dev/null
+++ b/nova/virt/baremetal/db/api.py
@@ -0,0 +1,175 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Defines interface for DB access.
+
+The underlying driver is loaded as a :class:`LazyPluggable`.
+
+Functions in this module are imported into the nova.virt.baremetal.db
+namespace. Call these functions from nova.virt.baremetal.db namespace, not
+the nova.virt.baremetal.db.api namespace.
+
+All functions in this module return objects that implement a dictionary-like
+interface. Currently, many of these objects are sqlalchemy objects that
+implement a dictionary interface. However, a future goal is to have all of
+these objects be simple dictionaries.
+
+
+**Related Flags**
+
+:baremetal_db_backend: string to lookup in the list of LazyPluggable backends.
+ `sqlalchemy` is the only supported backend right now.
+
+:baremetal_sql_connection: string specifying the sqlalchemy connection to use,
+ like: `sqlite:///var/lib/nova/nova.sqlite`.
+
+"""
+
+from nova import config
+from nova.openstack.common import cfg
+from nova import utils
+
+
+db_opts = [
+ cfg.StrOpt('baremetal_db_backend',
+ default='sqlalchemy',
+ help='The backend to use for db'),
+ ]
+
+CONF = config.CONF
+CONF.register_opts(db_opts)
+
+IMPL = utils.LazyPluggable(
+ 'baremetal_db_backend',
+ sqlalchemy='nova.virt.baremetal.db.sqlalchemy.api')
+
+
+def bm_node_get_all(context, service_host=None):
+ return IMPL.bm_node_get_all(context,
+ service_host=service_host)
+
+
+def bm_node_find_free(context, service_host=None,
+ memory_mb=None, cpus=None, local_gb=None):
+ return IMPL.bm_node_find_free(context,
+ service_host=service_host,
+ memory_mb=memory_mb,
+ cpus=cpus,
+ local_gb=local_gb)
+
+
+def bm_node_get(context, bm_node_id):
+ return IMPL.bm_node_get(context, bm_node_id)
+
+
+def bm_node_get_by_instance_uuid(context, instance_uuid):
+ return IMPL.bm_node_get_by_instance_uuid(context,
+ instance_uuid)
+
+
+def bm_node_create(context, values):
+ return IMPL.bm_node_create(context, values)
+
+
+def bm_node_destroy(context, bm_node_id):
+ return IMPL.bm_node_destroy(context, bm_node_id)
+
+
+def bm_node_update(context, bm_node_id, values):
+ return IMPL.bm_node_update(context, bm_node_id, values)
+
+
+def bm_pxe_ip_create(context, address, server_address):
+ return IMPL.bm_pxe_ip_create(context, address, server_address)
+
+
+def bm_pxe_ip_create_direct(context, bm_pxe_ip):
+ return IMPL.bm_pxe_ip_create_direct(context, bm_pxe_ip)
+
+
+def bm_pxe_ip_destroy(context, ip_id):
+ return IMPL.bm_pxe_ip_destroy(context, ip_id)
+
+
+def bm_pxe_ip_destroy_by_address(context, address):
+ return IMPL.bm_pxe_ip_destroy_by_address(context, address)
+
+
+def bm_pxe_ip_get_all(context):
+ return IMPL.bm_pxe_ip_get_all(context)
+
+
+def bm_pxe_ip_get(context, ip_id):
+ return IMPL.bm_pxe_ip_get(context, ip_id)
+
+
+def bm_pxe_ip_get_by_bm_node_id(context, bm_node_id):
+ return IMPL.bm_pxe_ip_get_by_bm_node_id(context, bm_node_id)
+
+
+def bm_pxe_ip_associate(context, bm_node_id):
+ return IMPL.bm_pxe_ip_associate(context, bm_node_id)
+
+
+def bm_pxe_ip_disassociate(context, bm_node_id):
+ return IMPL.bm_pxe_ip_disassociate(context, bm_node_id)
+
+
+def bm_interface_get(context, if_id):
+ return IMPL.bm_interface_get(context, if_id)
+
+
+def bm_interface_get_all(context):
+ return IMPL.bm_interface_get_all(context)
+
+
+def bm_interface_destroy(context, if_id):
+ return IMPL.bm_interface_destroy(context, if_id)
+
+
+def bm_interface_create(context, bm_node_id, address, datapath_id, port_no):
+ return IMPL.bm_interface_create(context, bm_node_id, address,
+ datapath_id, port_no)
+
+
+def bm_interface_set_vif_uuid(context, if_id, vif_uuid):
+ return IMPL.bm_interface_set_vif_uuid(context, if_id, vif_uuid)
+
+
+def bm_interface_get_by_vif_uuid(context, vif_uuid):
+ return IMPL.bm_interface_get_by_vif_uuid(context, vif_uuid)
+
+
+def bm_interface_get_all_by_bm_node_id(context, bm_node_id):
+ return IMPL.bm_interface_get_all_by_bm_node_id(context, bm_node_id)
+
+
+def bm_deployment_create(context, key, image_path, pxe_config_path, root_mb,
+ swap_mb):
+ return IMPL.bm_deployment_create(context, key, image_path,
+ pxe_config_path, root_mb, swap_mb)
+
+
+def bm_deployment_get(context, dep_id):
+ return IMPL.bm_deployment_get(context, dep_id)
+
+
+def bm_deployment_destroy(context, dep_id):
+ return IMPL.bm_deployment_destroy(context, dep_id)
diff --git a/nova/virt/baremetal/db/migration.py b/nova/virt/baremetal/db/migration.py
new file mode 100644
index 000000000..40631bf45
--- /dev/null
+++ b/nova/virt/baremetal/db/migration.py
@@ -0,0 +1,38 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Database setup and migration commands."""
+
+from nova import utils
+
+
+IMPL = utils.LazyPluggable(
+ 'baremetal_db_backend',
+ sqlalchemy='nova.virt.baremetal.db.sqlalchemy.migration')
+
+INIT_VERSION = 0
+
+
+def db_sync(version=None):
+ """Migrate the database to `version` or the most recent version."""
+ return IMPL.db_sync(version=version)
+
+
+def db_version():
+ """Display the current database version."""
+ return IMPL.db_version()
diff --git a/nova/virt/baremetal/db/sqlalchemy/__init__.py b/nova/virt/baremetal/db/sqlalchemy/__init__.py
new file mode 100644
index 000000000..19071662c
--- /dev/null
+++ b/nova/virt/baremetal/db/sqlalchemy/__init__.py
@@ -0,0 +1,14 @@
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
diff --git a/nova/virt/baremetal/db/sqlalchemy/api.py b/nova/virt/baremetal/db/sqlalchemy/api.py
new file mode 100644
index 000000000..1127d77e8
--- /dev/null
+++ b/nova/virt/baremetal/db/sqlalchemy/api.py
@@ -0,0 +1,351 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Implementation of SQLAlchemy backend."""
+
+from sqlalchemy import and_
+from sqlalchemy.exc import IntegrityError
+from sqlalchemy import or_
+from sqlalchemy.orm import joinedload
+from sqlalchemy.orm import joinedload_all
+from sqlalchemy.sql.expression import asc
+from sqlalchemy.sql.expression import desc
+from sqlalchemy.sql.expression import literal_column
+from sqlalchemy.sql import func
+
+from nova.db.sqlalchemy.api import is_user_context
+from nova.db.sqlalchemy.api import require_admin_context
+from nova import exception
+from nova.openstack.common import log as logging
+from nova.openstack.common import timeutils
+from nova.virt.baremetal.db.sqlalchemy import models
+from nova.virt.baremetal.db.sqlalchemy.session import get_session
+
+LOG = logging.getLogger(__name__)
+
+
+def model_query(context, *args, **kwargs):
+ """Query helper that accounts for context's `read_deleted` field.
+
+ :param context: context to query under
+ :param session: if present, the session to use
+ :param read_deleted: if present, overrides context's read_deleted field.
+ :param project_only: if present and context is user-type, then restrict
+ query to match the context's project_id.
+ """
+ session = kwargs.get('session') or get_session()
+ read_deleted = kwargs.get('read_deleted') or context.read_deleted
+ project_only = kwargs.get('project_only')
+
+ query = session.query(*args)
+
+ if read_deleted == 'no':
+ query = query.filter_by(deleted=False)
+ elif read_deleted == 'yes':
+ pass # omit the filter to include deleted and active
+ elif read_deleted == 'only':
+ query = query.filter_by(deleted=True)
+ else:
+ raise Exception(
+ _("Unrecognized read_deleted value '%s'") % read_deleted)
+
+ if project_only and is_user_context(context):
+ query = query.filter_by(project_id=context.project_id)
+
+ return query
+
+
+def _save(ref, session=None):
+ if not session:
+ session = get_session()
+ # We must not call ref.save() with session=None, otherwise NovaBase
+ # uses nova-db's session, which cannot access bm-db.
+ ref.save(session=session)
+
+
+def _build_node_order_by(query):
+ query = query.order_by(asc(models.BareMetalNode.memory_mb))
+ query = query.order_by(asc(models.BareMetalNode.cpus))
+ query = query.order_by(asc(models.BareMetalNode.local_gb))
+ return query
+
+
+@require_admin_context
+def bm_node_get_all(context, service_host=None):
+ query = model_query(context, models.BareMetalNode, read_deleted="no")
+ if service_host:
+ query = query.filter_by(service_host=service_host)
+ return query.all()
+
+
+@require_admin_context
+def bm_node_find_free(context, service_host=None,
+ cpus=None, memory_mb=None, local_gb=None):
+ query = model_query(context, models.BareMetalNode, read_deleted="no")
+ query = query.filter(models.BareMetalNode.instance_uuid == None)
+ if service_host:
+ query = query.filter_by(service_host=service_host)
+ if cpus is not None:
+ query = query.filter(models.BareMetalNode.cpus >= cpus)
+ if memory_mb is not None:
+ query = query.filter(models.BareMetalNode.memory_mb >= memory_mb)
+ if local_gb is not None:
+ query = query.filter(models.BareMetalNode.local_gb >= local_gb)
+ query = _build_node_order_by(query)
+ return query.first()
+
+
+@require_admin_context
+def bm_node_get(context, bm_node_id):
+ result = model_query(context, models.BareMetalNode, read_deleted="no").\
+ filter_by(id=bm_node_id).\
+ first()
+ return result
+
+
+@require_admin_context
+def bm_node_get_by_instance_uuid(context, instance_uuid):
+ result = model_query(context, models.BareMetalNode, read_deleted="no").\
+ filter_by(instance_uuid=instance_uuid).\
+ first()
+ return result
+
+
+@require_admin_context
+def bm_node_create(context, values):
+ bm_node_ref = models.BareMetalNode()
+ bm_node_ref.update(values)
+ _save(bm_node_ref)
+ return bm_node_ref
+
+
+@require_admin_context
+def bm_node_update(context, bm_node_id, values, ):
+ model_query(context, models.BareMetalNode, read_deleted="no").\
+ filter_by(id=bm_node_id).\
+ update(values)
+
+
+@require_admin_context
+def bm_node_destroy(context, bm_node_id):
+ model_query(context, models.BareMetalNode).\
+ filter_by(id=bm_node_id).\
+ update({'deleted': True,
+ 'deleted_at': timeutils.utcnow(),
+ 'updated_at': literal_column('updated_at')})
+
+
+@require_admin_context
+def bm_pxe_ip_get_all(context, session=None):
+ query = model_query(context, models.BareMetalPxeIp, read_deleted="no")
+ return query.all()
+
+
+@require_admin_context
+def bm_pxe_ip_create(context, address, server_address):
+ ref = models.BareMetalPxeIp()
+ ref.address = address
+ ref.server_address = server_address
+ _save(ref)
+ return ref
+
+
+@require_admin_context
+def bm_pxe_ip_create_direct(context, bm_pxe_ip):
+ ref = bm_pxe_ip_create(context,
+ address=bm_pxe_ip['address'],
+ server_address=bm_pxe_ip['server_address'])
+ return ref
+
+
+@require_admin_context
+def bm_pxe_ip_destroy(context, ip_id):
+ # Delete physically since it has unique columns
+ model_query(context, models.BareMetalPxeIp, read_deleted="no").\
+ filter_by(id=ip_id).\
+ delete()
+
+
+@require_admin_context
+def bm_pxe_ip_destroy_by_address(context, address):
+ # Delete physically since it has unique columns
+ model_query(context, models.BareMetalPxeIp, read_deleted="no").\
+ filter_by(address=address).\
+ delete()
+
+
+@require_admin_context
+def bm_pxe_ip_get(context, ip_id):
+ ref = model_query(context, models.BareMetalPxeIp, read_deleted="no").\
+ filter_by(id=ip_id).\
+ first()
+ return ref
+
+
+@require_admin_context
+def bm_pxe_ip_get_by_bm_node_id(context, bm_node_id):
+ ref = model_query(context, models.BareMetalPxeIp, read_deleted="no").\
+ filter_by(bm_node_id=bm_node_id).\
+ first()
+ return ref
+
+
+@require_admin_context
+def bm_pxe_ip_associate(context, bm_node_id):
+ session = get_session()
+ with session.begin():
+ # Check if the node really exists
+ node_ref = model_query(context, models.BareMetalNode,
+ read_deleted="no", session=session).\
+ filter_by(id=bm_node_id).\
+ first()
+ if not node_ref:
+ raise exception.NovaException("bm_node %s not found" % bm_node_id)
+ # Check if the node already has a pxe_ip
+ ip_ref = model_query(context, models.BareMetalPxeIp,
+ read_deleted="no", session=session).\
+ filter_by(bm_node_id=bm_node_id).\
+ first()
+ if ip_ref:
+ return ip_ref.id
+ # with_lockmode('update') and filter_by(bm_node_id=None) will lock all
+ # records. It may cause a performance problem in high-concurrency
+ # environment.
+ ip_ref = model_query(context, models.BareMetalPxeIp,
+ read_deleted="no", session=session).\
+ filter_by(bm_node_id=None).\
+ with_lockmode('update').\
+ first()
+ if not ip_ref:
+ raise exception.NovaException("free bm_pxe_ip not found")
+ ip_ref.bm_node_id = bm_node_id
+ session.add(ip_ref)
+ return ip_ref.id
+
+
+@require_admin_context
+def bm_pxe_ip_disassociate(context, bm_node_id):
+ model_query(context, models.BareMetalPxeIp, read_deleted="no").\
+ filter_by(bm_node_id=bm_node_id).\
+ update({'bm_node_id': None})
+
+
+@require_admin_context
+def bm_interface_get(context, if_id):
+ result = model_query(context, models.BareMetalInterface,
+ read_deleted="no").\
+ filter_by(id=if_id).\
+ first()
+ return result
+
+
+def bm_interface_get_all(context):
+ query = model_query(context, models.BareMetalInterface,
+ read_deleted="no")
+ return query.all()
+
+
+@require_admin_context
+def bm_interface_destroy(context, if_id):
+ # Delete physically since it has unique columns
+ model_query(context, models.BareMetalInterface, read_deleted="no").\
+ filter_by(id=if_id).\
+ delete()
+
+
+@require_admin_context
+def bm_interface_create(context, bm_node_id, address, datapath_id, port_no):
+ ref = models.BareMetalInterface()
+ ref.bm_node_id = bm_node_id
+ ref.address = address
+ ref.datapath_id = datapath_id
+ ref.port_no = port_no
+ _save(ref)
+ return ref.id
+
+
+@require_admin_context
+def bm_interface_set_vif_uuid(context, if_id, vif_uuid):
+ session = get_session()
+ with session.begin():
+ ref = model_query(context, models.BareMetalInterface,
+ read_deleted="no", session=session).\
+ filter_by(id=if_id).\
+ with_lockmode('update').\
+ first()
+ if not ref:
+ raise exception.NovaException('interface id=%s is not found' %
+ if_id)
+ ref.vif_uuid = vif_uuid
+ try:
+ session.add(ref)
+ session.flush()
+ except IntegrityError:
+ raise exception.NovaException('vif_uuid %s is already assigned' %
+ vif_uuid)
+
+
+@require_admin_context
+def bm_interface_get_by_vif_uuid(context, vif_uuid):
+ result = model_query(context, models.BareMetalInterface,
+ read_deleted="no").\
+ filter_by(vif_uuid=vif_uuid).\
+ first()
+ return result
+
+
+@require_admin_context
+def bm_interface_get_all_by_bm_node_id(context, bm_node_id):
+ result = model_query(context, models.BareMetalInterface,
+ read_deleted="no").\
+ filter_by(bm_node_id=bm_node_id).\
+ all()
+ return result
+
+
+@require_admin_context
+def bm_deployment_create(context, key, image_path, pxe_config_path, root_mb,
+ swap_mb):
+ ref = models.BareMetalDeployment()
+ ref.key = key
+ ref.image_path = image_path
+ ref.pxe_config_path = pxe_config_path
+ ref.root_mb = root_mb
+ ref.swap_mb = swap_mb
+ _save(ref)
+ return ref.id
+
+
+@require_admin_context
+def bm_deployment_get(context, dep_id):
+ result = model_query(context, models.BareMetalDeployment,
+ read_deleted="no").\
+ filter_by(id=dep_id).\
+ first()
+ return result
+
+
+@require_admin_context
+def bm_deployment_destroy(context, dep_id):
+ model_query(context, models.BareMetalDeployment).\
+ filter_by(id=dep_id).\
+ update({'deleted': True,
+ 'deleted_at': timeutils.utcnow(),
+ 'updated_at': literal_column('updated_at')})
diff --git a/nova/virt/baremetal/db/sqlalchemy/migrate_repo/__init__.py b/nova/virt/baremetal/db/sqlalchemy/migrate_repo/__init__.py
new file mode 100644
index 000000000..19071662c
--- /dev/null
+++ b/nova/virt/baremetal/db/sqlalchemy/migrate_repo/__init__.py
@@ -0,0 +1,14 @@
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
diff --git a/nova/virt/baremetal/db/sqlalchemy/migrate_repo/migrate.cfg b/nova/virt/baremetal/db/sqlalchemy/migrate_repo/migrate.cfg
new file mode 100644
index 000000000..368e93a52
--- /dev/null
+++ b/nova/virt/baremetal/db/sqlalchemy/migrate_repo/migrate.cfg
@@ -0,0 +1,20 @@
+[db_settings]
+# Used to identify which repository this database is versioned under.
+# You can use the name of your project.
+repository_id=nova_bm
+
+# The name of the database table used to track the schema version.
+# This name shouldn't already be used by your project.
+# If this is changed once a database is under version control, you'll need to
+# change the table name in each database too.
+version_table=migrate_version
+
+# When committing a change script, Migrate will attempt to generate the
+# sql for all supported databases; normally, if one of them fails - probably
+# because you don't have that database installed - it is ignored and the
+# commit continues, perhaps ending successfully.
+# Databases in this list MUST compile successfully during a commit, or the
+# entire commit will fail. List the databases your application will actually
+# be using to ensure your updates to that database work properly.
+# This must be a list; example: ['postgres','sqlite']
+required_dbs=[]
diff --git a/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/001_init.py b/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/001_init.py
new file mode 100644
index 000000000..d945755fc
--- /dev/null
+++ b/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/001_init.py
@@ -0,0 +1,124 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from migrate import ForeignKeyConstraint
+from sqlalchemy import Boolean, BigInteger, Column, DateTime, Float, ForeignKey
+from sqlalchemy import Index, Integer, MetaData, String, Table, Text
+
+from nova.openstack.common import log as logging
+
+LOG = logging.getLogger(__name__)
+
+
+def upgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ bm_nodes = Table('bm_nodes', meta,
+ Column('created_at', DateTime),
+ Column('updated_at', DateTime),
+ Column('deleted_at', DateTime),
+ Column('deleted', Boolean),
+ Column('id', Integer, primary_key=True, nullable=False),
+ Column('cpus', Integer),
+ Column('memory_mb', Integer),
+ Column('local_gb', Integer),
+ Column('pm_address', String(length=255)),
+ Column('pm_user', String(length=255)),
+ Column('pm_password', String(length=255)),
+ Column('service_host', String(length=255)),
+ Column('prov_mac_address', String(length=255)),
+ Column('instance_uuid', String(length=36)),
+ Column('registration_status', String(length=16)),
+ Column('task_state', String(length=255)),
+ Column('prov_vlan_id', Integer),
+ Column('terminal_port', Integer),
+ mysql_engine='InnoDB',
+ #mysql_charset='utf8'
+ )
+
+ bm_interfaces = Table('bm_interfaces', meta,
+ Column('created_at', DateTime),
+ Column('updated_at', DateTime),
+ Column('deleted_at', DateTime),
+ Column('deleted', Boolean),
+ Column('id', Integer, primary_key=True, nullable=False),
+ Column('bm_node_id', Integer),
+ Column('address', String(length=255), unique=True),
+ Column('datapath_id', String(length=255)),
+ Column('port_no', Integer),
+ Column('vif_uuid', String(length=36), unique=True),
+ mysql_engine='InnoDB',
+ #mysql_charset='utf8'
+ )
+
+ bm_pxe_ips = Table('bm_pxe_ips', meta,
+ Column('created_at', DateTime),
+ Column('updated_at', DateTime),
+ Column('deleted_at', DateTime),
+ Column('deleted', Boolean),
+ Column('id', Integer, primary_key=True, nullable=False),
+ Column('address', String(length=255), unique=True),
+ Column('bm_node_id', Integer),
+ Column('server_address', String(length=255), unique=True),
+ mysql_engine='InnoDB',
+ #mysql_charset='utf8'
+ )
+
+ bm_deployments = Table('bm_deployments', meta,
+ Column('created_at', DateTime),
+ Column('updated_at', DateTime),
+ Column('deleted_at', DateTime),
+ Column('deleted', Boolean),
+ Column('id', Integer, primary_key=True, nullable=False),
+ Column('bm_node_id', Integer),
+ Column('key', String(length=255)),
+ Column('image_path', String(length=255)),
+ Column('pxe_config_path', String(length=255)),
+ Column('root_mb', Integer),
+ Column('swap_mb', Integer),
+ mysql_engine='InnoDB',
+ #mysql_charset='utf8'
+ )
+
+ bm_nodes.create()
+ bm_interfaces.create()
+ bm_pxe_ips.create()
+ bm_deployments.create()
+
+ Index('idx_bm_nodes_service_host_deleted',
+ bm_nodes.c.service_host, bm_nodes.c.deleted)\
+ .create(migrate_engine)
+ Index('idx_bm_nodes_instance_uuid_deleted',
+ bm_nodes.c.instance_uuid, bm_nodes.c.deleted)\
+ .create(migrate_engine)
+ Index('idx_bm_nodes_hmcld',
+ bm_nodes.c.service_host, bm_nodes.c.memory_mb, bm_nodes.c.cpus,
+ bm_nodes.c.local_gb, bm_nodes.c.deleted)\
+ .create(migrate_engine)
+
+ Index('idx_bm_interfaces_bm_node_id_deleted',
+ bm_interfaces.c.bm_node_id, bm_interfaces.c.deleted)\
+ .create(migrate_engine)
+
+ Index('idx_bm_pxe_ips_bm_node_id_deleted',
+ bm_pxe_ips.c.bm_node_id, bm_pxe_ips.c.deleted)\
+ .create(migrate_engine)
+
+
+def downgrade(migrate_engine):
+ pass
diff --git a/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/__init__.py b/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/__init__.py
new file mode 100644
index 000000000..19071662c
--- /dev/null
+++ b/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/__init__.py
@@ -0,0 +1,14 @@
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
diff --git a/nova/virt/baremetal/db/sqlalchemy/migration.py b/nova/virt/baremetal/db/sqlalchemy/migration.py
new file mode 100644
index 000000000..d91024a5e
--- /dev/null
+++ b/nova/virt/baremetal/db/sqlalchemy/migration.py
@@ -0,0 +1,114 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import distutils.version as dist_version
+import migrate
+from migrate.versioning import util as migrate_util
+import os
+import sqlalchemy
+
+from nova import config
+from nova import exception
+from nova.openstack.common import log as logging
+from nova.virt.baremetal.db import migration
+from nova.virt.baremetal.db.sqlalchemy.session import get_engine
+
+LOG = logging.getLogger(__name__)
+
+
+@migrate_util.decorator
+def patched_with_engine(f, *a, **kw):
+ url = a[0]
+ engine = migrate_util.construct_engine(url, **kw)
+
+ try:
+ kw['engine'] = engine
+ return f(*a, **kw)
+ finally:
+ if isinstance(engine, migrate_util.Engine) and engine is not url:
+ migrate_util.log.debug('Disposing SQLAlchemy engine %s', engine)
+ engine.dispose()
+
+
+# TODO(jkoelker) When migrate 0.7.3 is released and nova depends
+# on that version or higher, this can be removed
+MIN_PKG_VERSION = dist_version.StrictVersion('0.7.3')
+if (not hasattr(migrate, '__version__') or
+ dist_version.StrictVersion(migrate.__version__) < MIN_PKG_VERSION):
+ migrate_util.with_engine = patched_with_engine
+
+
+# NOTE(jkoelker) Delay importing migrate until we are patched
+from migrate import exceptions as versioning_exceptions
+from migrate.versioning import api as versioning_api
+from migrate.versioning.repository import Repository
+
+
+_REPOSITORY = None
+
+
+def db_sync(version=None):
+ if version is not None:
+ try:
+ version = int(version)
+ except ValueError:
+ raise exception.NovaException(_("version should be an integer"))
+
+ current_version = db_version()
+ repository = _find_migrate_repo()
+ if version is None or version > current_version:
+ return versioning_api.upgrade(get_engine(), repository, version)
+ else:
+ return versioning_api.downgrade(get_engine(), repository,
+ version)
+
+
+def db_version():
+ repository = _find_migrate_repo()
+ try:
+ return versioning_api.db_version(get_engine(), repository)
+ except versioning_exceptions.DatabaseNotControlledError:
+ meta = sqlalchemy.MetaData()
+ engine = get_engine()
+ meta.reflect(bind=engine)
+ tables = meta.tables
+ if len(tables) == 0:
+ db_version_control(migration.INIT_VERSION)
+ return versioning_api.db_version(get_engine(), repository)
+ else:
+ # Some pre-Essex DB's may not be version controlled.
+ # Require them to upgrade using Essex first.
+ raise exception.NovaException(
+ _("Upgrade DB using Essex release first."))
+
+
+def db_version_control(version=None):
+ repository = _find_migrate_repo()
+ versioning_api.version_control(get_engine(), repository, version)
+ return version
+
+
+def _find_migrate_repo():
+ """Get the path for the migrate repository."""
+ global _REPOSITORY
+ path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
+ 'migrate_repo')
+ assert os.path.exists(path)
+ if _REPOSITORY is None:
+ _REPOSITORY = Repository(path)
+ return _REPOSITORY
diff --git a/nova/virt/baremetal/db/sqlalchemy/models.py b/nova/virt/baremetal/db/sqlalchemy/models.py
new file mode 100644
index 000000000..c1ab191d0
--- /dev/null
+++ b/nova/virt/baremetal/db/sqlalchemy/models.py
@@ -0,0 +1,80 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+SQLAlchemy models for baremetal data.
+"""
+
+from sqlalchemy.orm import relationship, backref, object_mapper
+from sqlalchemy import Column, Integer, BigInteger, String, schema
+from sqlalchemy import ForeignKey, DateTime, Boolean, Text, Float, Index
+from sqlalchemy.exc import IntegrityError
+from sqlalchemy.ext.declarative import declarative_base
+from sqlalchemy.schema import ForeignKeyConstraint
+
+from nova.db.sqlalchemy import models
+
+
+BASE = declarative_base()
+
+
+class BareMetalNode(BASE, models.NovaBase):
+ """Represents a bare metal node."""
+
+ __tablename__ = 'bm_nodes'
+ id = Column(Integer, primary_key=True)
+ service_host = Column(String(255))
+ instance_uuid = Column(String(36), nullable=True)
+ cpus = Column(Integer)
+ memory_mb = Column(Integer)
+ local_gb = Column(Integer)
+ pm_address = Column(Text)
+ pm_user = Column(Text)
+ pm_password = Column(Text)
+ prov_mac_address = Column(Text)
+ registration_status = Column(String(16))
+ task_state = Column(String(255))
+ prov_vlan_id = Column(Integer)
+ terminal_port = Column(Integer)
+
+
+class BareMetalPxeIp(BASE, models.NovaBase):
+ __tablename__ = 'bm_pxe_ips'
+ id = Column(Integer, primary_key=True)
+ address = Column(String(255), unique=True)
+ server_address = Column(String(255), unique=True)
+ bm_node_id = Column(Integer, ForeignKey('bm_nodes.id'), nullable=True)
+
+
+class BareMetalInterface(BASE, models.NovaBase):
+ __tablename__ = 'bm_interfaces'
+ id = Column(Integer, primary_key=True)
+ bm_node_id = Column(Integer, ForeignKey('bm_nodes.id'), nullable=True)
+ address = Column(String(255), unique=True)
+ datapath_id = Column(String(255))
+ port_no = Column(Integer)
+ vif_uuid = Column(String(36), unique=True)
+
+
+class BareMetalDeployment(BASE, models.NovaBase):
+ __tablename__ = 'bm_deployments'
+ id = Column(Integer, primary_key=True)
+ key = Column(String(255))
+ image_path = Column(String(255))
+ pxe_config_path = Column(String(255))
+ root_mb = Column(Integer)
+ swap_mb = Column(Integer)
diff --git a/nova/virt/baremetal/db/sqlalchemy/session.py b/nova/virt/baremetal/db/sqlalchemy/session.py
new file mode 100644
index 000000000..2cae17f18
--- /dev/null
+++ b/nova/virt/baremetal/db/sqlalchemy/session.py
@@ -0,0 +1,58 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Session Handling for SQLAlchemy backend."""
+
+from nova import config
+from nova.db.sqlalchemy import session as nova_session
+from nova.openstack.common import cfg
+
+opts = [
+ cfg.StrOpt('baremetal_sql_connection',
+ default='sqlite:///$state_path/baremetal_$sqlite_db',
+ help='The SQLAlchemy connection string used to connect to the '
+ 'bare-metal database'),
+ ]
+
+CONF = config.CONF
+CONF.register_opts(opts)
+
+_ENGINE = None
+_MAKER = None
+
+
+def get_session(autocommit=True, expire_on_commit=False):
+ """Return a SQLAlchemy session."""
+ global _MAKER
+
+ if _MAKER is None:
+ engine = get_engine()
+ _MAKER = nova_session.get_maker(engine, autocommit, expire_on_commit)
+
+ session = _MAKER()
+ session = nova_session.wrap_session(session)
+ return session
+
+
+def get_engine():
+ """Return a SQLAlchemy engine."""
+ global _ENGINE
+ if _ENGINE is None:
+ _ENGINE = nova_session.create_engine(CONF.baremetal_sql_connection)
+ return _ENGINE
diff --git a/nova/virt/baremetal/doc/README.rst b/nova/virt/baremetal/doc/README.rst
new file mode 100644
index 000000000..6d5cfd466
--- /dev/null
+++ b/nova/virt/baremetal/doc/README.rst
@@ -0,0 +1,69 @@
+General Bare-metal Provisioning README
+======================================
+
+:Authors:
+ [USC/ISI] Mikyung Kang <mkkang@isi.edu>, David Kang <dkang@isi.edu>
+
+ [NTT DOCOMO] Ken Igarashi <igarashik@nttdocomo.co.jp>
+
+ [VirtualTech Japan Inc.] Arata Notsu <notsu@virtualtech.jp>
+:Date: 2012-08-02
+:Version: 2012.8
+:Wiki: http://wiki.openstack.org/GeneralBareMetalProvisioningFramework
+
+Code changes
+------------
+
+::
+
+ nova/nova/virt/baremetal/*
+ nova/nova/virt/driver.py
+ nova/nova/tests/baremetal/*
+ nova/nova/tests/compute/test_compute.py
+ nova/nova/compute/manager.py
+ nova/nova/compute/resource_tracker.py
+ nova/nova/manager.py
+ nova/nova/scheduler/driver.py
+ nova/nova/scheduler/filter_scheduler.py
+ nova/nova/scheduler/host_manager.py
+ nova/nova/scheduler/baremetal_host_manager.py
+ nova/bin/bm_deploy_server
+ nova/bin/nova-bm-manage
+
+Additional setting for bare-metal provisioning [nova.conf]
+----------------------------------------------------------
+
+::
+
+ # baremetal database connection
+ baremetal_sql_connection = mysql://$ID:$Password@$IP/nova_bm
+
+ # baremetal compute driver
+ compute_driver = nova.virt.baremetal.driver.BareMetalDriver
+ baremetal_driver = {nova.virt.baremetal.tilera.TILERA | nova.virt.baremetal.pxe.PXE}
+ power_manager = {nova.virt.baremetal.tilera_pdu.Pdu | nova.virt.baremetal.ipmi.Ipmi}
+
+ # instance_type_extra_specs this baremetal compute
+ instanse_type_extra_specs = cpu_arch:{tilepro64 | x86_64 | arm}
+
+ # TFTP root
+ baremetal_tftp_root = /tftpboot
+
+ # baremetal scheduler host manager
+ scheduler_host_manager = nova.scheduler.baremetal_host_manager.BaremetalHostManager
+
+
+Non-PXE (Tilera) Bare-metal Provisioning
+----------------------------------------
+
+1. tilera-bm-instance-creation.rst
+
+2. tilera-bm-installation.rst
+
+PXE Bare-metal Provisioning
+---------------------------
+
+1. pxe-bm-instance-creation.rst
+
+2. pxe-bm-installation.rst
+
diff --git a/nova/virt/baremetal/dom.py b/nova/virt/baremetal/dom.py
deleted file mode 100644
index 350506f73..000000000
--- a/nova/virt/baremetal/dom.py
+++ /dev/null
@@ -1,266 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2011 University of Southern California
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.compute import power_state
-from nova import exception
-from nova import flags
-from nova.openstack.common import jsonutils
-from nova.openstack.common import log as logging
-from nova.virt.baremetal import nodes
-
-FLAGS = flags.FLAGS
-
-LOG = logging.getLogger(__name__)
-
-
-def read_domains(fname):
- try:
- f = open(fname, 'r')
- json = f.read()
- f.close()
- domains = jsonutils.loads(json)
- return domains
- except IOError:
- raise exception.NotFound()
-
-
-def write_domains(fname, domains):
- json = jsonutils.dumps(domains)
- f = open(fname, 'w')
- f.write(json)
- f.close()
-
-
-class BareMetalDom(object):
- """
- BareMetalDom class handles fake domain for bare metal back ends.
-
- This implements the singleton pattern.
- """
-
- _instance = None
- _is_init = False
-
- def __new__(cls, *args, **kwargs):
- """
- Returns the BareMetalDom singleton.
- """
- if not cls._instance or ('new' in kwargs and kwargs['new']):
- cls._instance = super(BareMetalDom, cls).__new__(cls)
- return cls._instance
-
- def __init__(self,
- fake_dom_file="/tftpboot/test_fake_dom_file"):
- """
- Only call __init__ the first time object is instantiated.
-
- Sets and Opens domain file: /tftpboot/test_fake_dom_file. Even though
- nova-compute service is rebooted, this file should retain the
- existing domains.
- """
- if self._is_init:
- return
- self._is_init = True
-
- self.fake_dom_file = fake_dom_file
- self.domains = []
- self.fake_dom_nums = 0
- self.baremetal_nodes = nodes.get_baremetal_nodes()
-
- self._read_domain_from_file()
-
- def _read_domain_from_file(self):
- """
- Reads the domains from a file.
- """
- try:
- self.domains = read_domains(self.fake_dom_file)
- except IOError:
- dom = []
- LOG.debug(_("No domains exist."))
- return
- msg = _("============= initial domains =========== : %s")
- LOG.debug(msg % (self.domains))
- for dom in self.domains[:]:
- if dom['status'] == power_state.BUILDING:
- LOG.debug(_("Building domain: to be removed"))
- self.destroy_domain(dom['name'])
- continue
- elif dom['status'] != power_state.RUNNING:
- LOG.debug(_("Not running domain: remove"))
- self.domains.remove(dom)
- continue
- res = self.baremetal_nodes.set_status(dom['node_id'],
- dom['status'])
- if res > 0:
- self.fake_dom_nums = self.fake_dom_nums + 1
- else:
- LOG.debug(_("domain running on an unknown node: discarded"))
- self.domains.remove(dom)
- continue
-
- LOG.debug(self.domains)
- self.store_domain()
-
- def reboot_domain(self, name):
- """
- Finds domain and deactivates (power down) bare-metal node.
-
- Activates the node again. In case of fail,
- destroys the domain from domains list.
- """
- fd = self.find_domain(name)
- if fd == []:
- msg = _("No such domain (%s)")
- raise exception.NotFound(msg % name)
- node_ip = self.baremetal_nodes.get_ip_by_id(fd['node_id'])
-
- try:
- self.baremetal_nodes.deactivate_node(fd['node_id'])
- except Exception:
- msg = _("Failed power down Bare-metal node %s")
- raise exception.NotFound(msg % fd['node_id'])
- self.change_domain_state(name, power_state.BUILDING)
- try:
- state = self.baremetal_nodes.activate_node(fd['node_id'],
- node_ip, name, fd['mac_address'], fd['ip_address'])
- self.change_domain_state(name, state)
- return state
- except Exception:
- LOG.debug(_("deactivate -> activate fails"))
- self.destroy_domain(name)
- raise
-
- def destroy_domain(self, name):
- """
- Removes domain from domains list and deactivates node.
- """
- fd = self.find_domain(name)
- if fd == []:
- LOG.debug(_("destroy_domain: no such domain"))
- msg = _("No such domain %s")
- raise exception.NotFound(msg % name)
-
- try:
- self.baremetal_nodes.deactivate_node(fd['node_id'])
-
- self.domains.remove(fd)
- msg = _("Domains: %s")
- LOG.debug(msg % (self.domains))
- self.store_domain()
- msg = _("After storing domains: %s")
- LOG.debug(msg % (self.domains))
- except Exception:
- LOG.debug(_("deactivation/removing domain failed"))
- raise
-
- def create_domain(self, xml_dict, bpath):
- """
- Adds a domain to domains list and activates an idle bare-metal node.
- """
- LOG.debug(_("===== Domain is being created ====="))
- fd = self.find_domain(xml_dict['name'])
- if fd != []:
- msg = _("Same domain name already exists")
- raise exception.NotFound(msg)
- LOG.debug(_("create_domain: before get_idle_node"))
-
- node_id = self.baremetal_nodes.get_idle_node()
- node_ip = self.baremetal_nodes.get_ip_by_id(node_id)
-
- new_dom = {'node_id': node_id,
- 'name': xml_dict['name'],
- 'memory_kb': xml_dict['memory_kb'],
- 'vcpus': xml_dict['vcpus'],
- 'mac_address': xml_dict['mac_address'],
- 'user_data': xml_dict['user_data'],
- 'ip_address': xml_dict['ip_address'],
- 'image_id': xml_dict['image_id'],
- 'kernel_id': xml_dict['kernel_id'],
- 'ramdisk_id': xml_dict['ramdisk_id'],
- 'status': power_state.BUILDING}
- self.domains.append(new_dom)
- msg = _("Created new domain: %s")
- LOG.debug(msg % (new_dom))
- self.change_domain_state(new_dom['name'], power_state.BUILDING)
-
- self.baremetal_nodes.set_image(bpath, node_id)
-
- state = power_state.NOSTATE
- try:
- state = self.baremetal_nodes.activate_node(node_id,
- node_ip, new_dom['name'], new_dom['mac_address'],
- new_dom['ip_address'], new_dom['user_data'])
- self.change_domain_state(new_dom['name'], state)
- except Exception:
- self.domains.remove(new_dom)
- self.baremetal_nodes.free_node(node_id)
- LOG.debug(_("Failed to boot Bare-metal node %s"), node_id)
- return state
-
- def change_domain_state(self, name, state):
- """
- Changes domain state by the given state and updates domain file.
- """
- l = self.find_domain(name)
- if l == []:
- msg = _("No such domain exists")
- raise exception.NotFound(msg)
- i = self.domains.index(l)
- self.domains[i]['status'] = state
- LOG.debug(_("change_domain_state: to new state %s"), str(state))
- self.store_domain()
-
- def store_domain(self):
- """
- Stores fake domains to the file.
- """
- msg = _("Stored fake domains to the file: %s")
- LOG.debug(msg % (self.domains))
- write_domains(self.fake_dom_file, self.domains)
-
- def find_domain(self, name):
- """
- Finds domain by the given name and returns the domain.
- """
- for item in self.domains:
- if item['name'] == name:
- return item
- LOG.debug(_("domain does not exist"))
- return []
-
- def list_domains(self):
- """
- Returns the instance name from domains list.
- """
- if self.domains == []:
- return []
- return [x['name'] for x in self.domains]
-
- def get_domain_info(self, instance_name):
- """
- Finds domain by the given instance_name and returns informaiton.
-
- For example, status, memory_kb, vcpus, etc.
- """
- domain = self.find_domain(instance_name)
- if domain != []:
- return [domain['status'], domain['memory_kb'],
- domain['memory_kb'],
- domain['vcpus'],
- 100]
- else:
- return [power_state.NOSTATE, '', '', '', '']
diff --git a/nova/virt/baremetal/driver.py b/nova/virt/baremetal/driver.py
deleted file mode 100644
index ee183584c..000000000
--- a/nova/virt/baremetal/driver.py
+++ /dev/null
@@ -1,742 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2011 University of Southern California
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-"""
-A connection to a hypervisor through baremetal.
-
-**Related Flags**
-
-:baremetal_type: Baremetal domain type.
-:baremetal_uri: Override for the default baremetal URI (baremetal_type).
-:rescue_image_id: Rescue ami image (default: ami-rescue).
-:rescue_kernel_id: Rescue aki image (default: aki-rescue).
-:rescue_ramdisk_id: Rescue ari image (default: ari-rescue).
-:injected_network_template: Template file for injected network
-:allow_project_net_traffic: Whether to allow in project network traffic
-
-"""
-
-import hashlib
-import os
-import shutil
-
-from nova.compute import instance_types
-from nova.compute import power_state
-from nova.compute import vm_states
-from nova import context as nova_context
-from nova import exception
-from nova import flags
-from nova import notifications
-from nova.openstack.common import cfg
-from nova.openstack.common import fileutils
-from nova.openstack.common import lockutils
-from nova.openstack.common import log as logging
-from nova import utils
-from nova.virt.baremetal import dom
-from nova.virt.baremetal import nodes
-from nova.virt.disk import api as disk
-from nova.virt import driver
-from nova.virt.libvirt import utils as libvirt_utils
-
-
-Template = None
-
-LOG = logging.getLogger(__name__)
-
-FLAGS = flags.FLAGS
-
-baremetal_opts = [
- cfg.StrOpt('baremetal_type',
- default='baremetal',
- help='baremetal domain type'),
- ]
-
-FLAGS.register_opts(baremetal_opts)
-
-
-def _late_load_cheetah():
- global Template
- if Template is None:
- t = __import__('Cheetah.Template', globals(), locals(),
- ['Template'], -1)
- Template = t.Template
-
-
-class BareMetalDriver(driver.ComputeDriver):
-
- def __init__(self, virtapi, read_only):
- _late_load_cheetah()
- # Note that baremetal doesn't have a read-only connection
- # mode, so the read_only parameter is ignored
- super(BareMetalDriver, self).__init__(virtapi)
- self.baremetal_nodes = nodes.get_baremetal_nodes()
- self._wrapped_conn = None
- self._host_state = None
-
- @property
- def HostState(self):
- if not self._host_state:
- self._host_state = HostState(self)
- return self._host_state
-
- def init_host(self, host):
- pass
-
- def _get_connection(self):
- self._wrapped_conn = dom.BareMetalDom()
- return self._wrapped_conn
- _conn = property(_get_connection)
-
- def get_pty_for_instance(self, instance_name):
- raise NotImplementedError()
-
- def list_instances(self):
- return self._conn.list_domains()
-
- def destroy(self, instance, network_info, block_device_info=None,
- cleanup=True):
- while True:
- try:
- self._conn.destroy_domain(instance['name'])
- break
- except Exception as ex:
- LOG.debug(_("Error encountered when destroying instance "
- "'%(name)s': %(ex)s") %
- {"name": instance["name"], "ex": ex},
- instance=instance)
- break
-
- if cleanup:
- self._cleanup(instance)
-
- return True
-
- def _cleanup(self, instance):
- target = os.path.join(FLAGS.instances_path, instance['name'])
- instance_name = instance['name']
- LOG.info(_('instance %(instance_name)s: deleting instance files'
- ' %(target)s') % locals(), instance=instance)
- if FLAGS.baremetal_type == 'lxc':
- disk.destroy_container(self.container)
- if os.path.exists(target):
- shutil.rmtree(target)
-
- @exception.wrap_exception
- def attach_volume(self, instance_name, device_path, mountpoint):
- raise exception.Invalid("attach_volume not supported for baremetal.")
-
- @exception.wrap_exception
- def detach_volume(self, instance_name, mountpoint):
- raise exception.Invalid("detach_volume not supported for baremetal.")
-
- @exception.wrap_exception
- def snapshot(self, instance, image_id):
- raise exception.Invalid("snapshot not supported for baremetal.")
-
- @exception.wrap_exception
- def reboot(self, instance):
- timer = utils.LoopingCall(f=None)
-
- def _wait_for_reboot():
- try:
- state = self._conn.reboot_domain(instance['name'])
- if state == power_state.RUNNING:
- LOG.debug(_('instance %s: rebooted'), instance['name'],
- instance=instance)
- timer.stop()
- except Exception:
- LOG.exception(_('_wait_for_reboot failed'), instance=instance)
- timer.stop()
- timer.f = _wait_for_reboot
- return timer.start(interval=0.5).wait()
-
- @exception.wrap_exception
- def rescue(self, context, instance, network_info, rescue_password):
- """Loads a VM using rescue images.
-
- A rescue is normally performed when something goes wrong with the
- primary images and data needs to be corrected/recovered. Rescuing
- should not edit or over-ride the original image, only allow for
- data recovery.
-
- """
- self.destroy(instance, False)
-
- rescue_images = {'image_id': FLAGS.baremetal_rescue_image_id,
- 'kernel_id': FLAGS.baremetal_rescue_kernel_id,
- 'ramdisk_id': FLAGS.baremetal_rescue_ramdisk_id}
- self._create_image(instance, '.rescue', rescue_images,
- network_info=network_info)
-
- timer = utils.LoopingCall(f=None)
-
- def _wait_for_rescue():
- try:
- state = self._conn.reboot_domain(instance['name'])
- if state == power_state.RUNNING:
- LOG.debug(_('instance %s: rescued'), instance['name'],
- instance=instance)
- timer.stop()
- except Exception:
- LOG.exception(_('_wait_for_rescue failed'), instance=instance)
- timer.stop()
- timer.f = _wait_for_rescue
- return timer.start(interval=0.5).wait()
-
- @exception.wrap_exception
- def unrescue(self, instance, network_info):
- """Reboot the VM which is being rescued back into primary images.
-
- Because reboot destroys and re-creates instances, unresue should
- simply call reboot.
-
- """
- self.reboot(instance)
-
- def spawn(self, context, instance, image_meta, injected_files,
- admin_password, network_info, block_device_info=None):
- LOG.debug(_("<============= spawn of baremetal =============>"))
-
- def basepath(fname='', suffix=''):
- return os.path.join(FLAGS.instances_path,
- instance['name'],
- fname + suffix)
- bpath = basepath(suffix='')
- timer = utils.LoopingCall(f=None)
-
- xml_dict = self.to_xml_dict(instance, network_info)
- self._create_image(context, instance, xml_dict,
- network_info=network_info,
- block_device_info=block_device_info)
- LOG.debug(_("instance %s: is building"), instance['name'],
- instance=instance)
- LOG.debug(xml_dict, instance=instance)
-
- def _wait_for_boot():
- try:
- LOG.debug(_("Key is injected but instance is not running yet"),
- instance=instance)
- (old_ref, new_ref) = self.virtapi.instance_update(
- context, instance['uuid'],
- {'vm_state': vm_states.BUILDING})
- notifications.send_update(context, old_ref, new_ref)
-
- state = self._conn.create_domain(xml_dict, bpath)
- if state == power_state.RUNNING:
- LOG.debug(_('instance %s: booted'), instance['name'],
- instance=instance)
- (old_ref, new_ref) = self.virtapi.instance_update(
- context, instance['uuid'],
- {'vm_state': vm_states.ACTIVE})
- notifications.send_update(context, old_ref, new_ref)
-
- LOG.debug(_('~~~~~~ current state = %s ~~~~~~'), state,
- instance=instance)
- LOG.debug(_("instance %s spawned successfully"),
- instance['name'], instance=instance)
- else:
- LOG.debug(_('instance %s:not booted'), instance['name'],
- instance=instance)
- except Exception:
- LOG.exception(_("Baremetal assignment is overcommitted."),
- instance=instance)
- (old_ref, new_ref) = self.virtapi.instance_update(
- context, instance['uuid'],
- {'vm_state': vm_states.ERROR,
- 'power_state': power_state.FAILED})
- notifications.send_update(context, old_ref, new_ref)
-
- timer.stop()
- timer.f = _wait_for_boot
-
- return timer.start(interval=0.5).wait()
-
- def get_console_output(self, instance):
- console_log = os.path.join(FLAGS.instances_path, instance['name'],
- 'console.log')
-
- libvirt_utils.chown(console_log, os.getuid())
-
- fd = self._conn.find_domain(instance['name'])
-
- self.baremetal_nodes.get_console_output(console_log, fd['node_id'])
-
- fpath = console_log
-
- return libvirt_utils.load_file(fpath)
-
- @exception.wrap_exception
- def get_ajax_console(self, instance):
- raise NotImplementedError()
-
- @exception.wrap_exception
- def get_vnc_console(self, instance):
- raise NotImplementedError()
-
- @staticmethod
- def _cache_image(fetch_func, target, fname, cow=False, *args, **kwargs):
- """Wrapper for a method that creates an image that caches the image.
-
- This wrapper will save the image into a common store and create a
- copy for use by the hypervisor.
-
- The underlying method should specify a kwarg of target representing
- where the image will be saved.
-
- fname is used as the filename of the base image. The filename needs
- to be unique to a given image.
-
- If cow is True, it will make a CoW image instead of a copy.
- """
- if not os.path.exists(target):
- base_dir = os.path.join(FLAGS.instances_path, '_base')
- if not os.path.exists(base_dir):
- fileutils.ensure_tree(base_dir)
- base = os.path.join(base_dir, fname)
-
- @lockutils.synchronized(fname, 'nova-')
- def call_if_not_exists(base, fetch_func, *args, **kwargs):
- if not os.path.exists(base):
- fetch_func(target=base, *args, **kwargs)
-
- call_if_not_exists(base, fetch_func, *args, **kwargs)
-
- if cow:
- libvirt_utils.create_cow_image(base, target)
- else:
- libvirt_utils.copy_image(base, target)
-
- def _create_image(self, context, inst, xml, suffix='',
- disk_images=None, network_info=None,
- block_device_info=None):
- if not suffix:
- suffix = ''
-
- # syntactic nicety
- def basepath(fname='', suffix=suffix):
- return os.path.join(FLAGS.instances_path,
- inst['name'],
- fname + suffix)
-
- # ensure directories exist and are writable
- fileutils.ensure_tree(basepath(suffix=''))
- utils.execute('chmod', '0777', basepath(suffix=''))
-
- LOG.info(_('instance %s: Creating image'), inst['name'],
- instance=inst)
-
- if FLAGS.baremetal_type == 'lxc':
- container_dir = '%s/rootfs' % basepath(suffix='')
- fileutils.ensure_tree(container_dir)
-
- # NOTE(vish): No need add the suffix to console.log
- libvirt_utils.write_to_file(basepath('console.log', ''), '', 007)
-
- if not disk_images:
- disk_images = {'image_id': inst['image_ref'],
- 'kernel_id': inst['kernel_id'],
- 'ramdisk_id': inst['ramdisk_id']}
-
- if disk_images['kernel_id']:
- fname = disk_images['kernel_id']
- self._cache_image(fetch_func=libvirt_utils.fetch_image,
- context=context,
- target=basepath('kernel'),
- fname=fname,
- cow=False,
- image_id=disk_images['kernel_id'],
- user_id=inst['user_id'],
- project_id=inst['project_id'])
- if disk_images['ramdisk_id']:
- fname = disk_images['ramdisk_id']
- self._cache_image(fetch_func=libvirt_utils.fetch_image,
- context=context,
- target=basepath('ramdisk'),
- fname=fname,
- cow=False,
- image_id=disk_images['ramdisk_id'],
- user_id=inst['user_id'],
- project_id=inst['project_id'])
-
- root_fname = hashlib.sha1(str(disk_images['image_id'])).hexdigest()
- size = inst['root_gb'] * 1024 * 1024 * 1024
-
- inst_type_id = inst['instance_type_id']
- inst_type = instance_types.get_instance_type(inst_type_id)
- if inst_type['name'] == 'm1.tiny' or suffix == '.rescue':
- size = None
- root_fname += "_sm"
- else:
- root_fname += "_%d" % inst['root_gb']
-
- self._cache_image(fetch_func=libvirt_utils.fetch_image,
- context=context,
- target=basepath('root'),
- fname=root_fname,
- cow=False, # FLAGS.use_cow_images,
- image_id=disk_images['image_id'],
- user_id=inst['user_id'],
- project_id=inst['project_id'])
-
- # For now, we assume that if we're not using a kernel, we're using a
- # partitioned disk image where the target partition is the first
- # partition
- target_partition = None
- if not inst['kernel_id']:
- target_partition = "1"
-
- if FLAGS.baremetal_type == 'lxc':
- target_partition = None
-
- if inst['key_data']:
- key = str(inst['key_data'])
- else:
- key = None
- net = None
-
- nets = []
- ifc_template = open(FLAGS.injected_network_template).read()
- ifc_num = -1
- have_injected_networks = False
- admin_context = nova_context.get_admin_context()
- for (network_ref, mapping) in network_info:
- ifc_num += 1
-
- if not network_ref['injected']:
- continue
-
- have_injected_networks = True
- address = mapping['ips'][0]['ip']
- netmask = mapping['ips'][0]['netmask']
- address_v6 = None
- gateway_v6 = None
- netmask_v6 = None
- if FLAGS.use_ipv6:
- address_v6 = mapping['ip6s'][0]['ip']
- netmask_v6 = mapping['ip6s'][0]['netmask']
- gateway_v6 = mapping['gateway_v6']
- net_info = {'name': 'eth%d' % ifc_num,
- 'address': address,
- 'netmask': netmask,
- 'gateway': mapping['gateway'],
- 'broadcast': mapping['broadcast'],
- 'dns': ' '.join(mapping['dns']),
- 'address_v6': address_v6,
- 'gateway_v6': gateway_v6,
- 'netmask_v6': netmask_v6}
- nets.append(net_info)
-
- if have_injected_networks:
- net = str(Template(ifc_template,
- searchList=[{'interfaces': nets,
- 'use_ipv6': FLAGS.use_ipv6}]))
-
- metadata = inst.get('metadata')
- if any((key, net, metadata)):
- inst_name = inst['name']
-
- injection_path = basepath('root')
- img_id = inst['image_ref']
-
- for injection in ('metadata', 'key', 'net'):
- if locals()[injection]:
- LOG.info(_('instance %(inst_name)s: injecting '
- '%(injection)s into image %(img_id)s'),
- locals(), instance=inst)
- try:
- disk.inject_data(injection_path, key, net, metadata,
- partition=target_partition,
- use_cow=False) # FLAGS.use_cow_images
-
- except Exception as e:
- # This could be a windows image, or a vmdk format disk
- LOG.warn(_('instance %(inst_name)s: ignoring error injecting'
- ' data into image %(img_id)s (%(e)s)') % locals(),
- instance=inst)
-
- def _prepare_xml_info(self, instance, network_info, rescue,
- block_device_info=None):
- # block_device_mapping = driver.block_device_info_get_mapping(
- # block_device_info)
- _map = 0
- for (_, mapping) in network_info:
- _map += 1
-
- nics = []
- # FIXME(vish): stick this in db
- inst_type_id = instance['instance_type_id']
- inst_type = instance_types.get_instance_type(inst_type_id)
-
- driver_type = 'raw'
-
- xml_info = {'type': FLAGS.baremetal_type,
- 'name': instance['name'],
- 'basepath': os.path.join(FLAGS.instances_path,
- instance['name']),
- 'memory_kb': inst_type['memory_mb'] * 1024,
- 'vcpus': inst_type['vcpus'],
- 'rescue': rescue,
- 'driver_type': driver_type,
- 'nics': nics,
- 'ip_address': mapping['ips'][0]['ip'],
- 'mac_address': mapping['mac'],
- 'user_data': instance['user_data'],
- 'image_id': instance['image_ref'],
- 'kernel_id': instance['kernel_id'],
- 'ramdisk_id': instance['ramdisk_id']}
-
- if not rescue:
- if instance['kernel_id']:
- xml_info['kernel'] = xml_info['basepath'] + "/kernel"
-
- if instance['ramdisk_id']:
- xml_info['ramdisk'] = xml_info['basepath'] + "/ramdisk"
-
- xml_info['disk'] = xml_info['basepath'] + "/disk"
- return xml_info
-
- def to_xml_dict(self, instance, rescue=False, network_info=None):
- LOG.debug(_('instance %s: starting toXML method'), instance['name'],
- instance=instance)
- xml_info = self._prepare_xml_info(instance, rescue, network_info)
- LOG.debug(_('instance %s: finished toXML method'), instance['name'],
- instance=instance)
- return xml_info
-
- def get_info(self, instance):
- """Retrieve information from baremetal for a specific instance name.
-
- If a baremetal error is encountered during lookup, we might raise a
- NotFound exception or Error exception depending on how severe the
- baremetal error is.
-
- """
- _domain_info = self._conn.get_domain_info(instance['name'])
- state, max_mem, mem, num_cpu, cpu_time = _domain_info
- return {'state': state,
- 'max_mem': max_mem,
- 'mem': mem,
- 'num_cpu': num_cpu,
- 'cpu_time': cpu_time}
-
- def _create_new_domain(self, persistent=True, launch_flags=0):
- raise NotImplementedError()
-
- def get_diagnostics(self, instance_name):
- # diagnostics are not supported for baremetal
- raise NotImplementedError()
-
- def get_disks(self, instance_name):
- raise NotImplementedError()
-
- def get_interfaces(self, instance_name):
- raise NotImplementedError()
-
- def get_vcpu_total(self):
- """Get vcpu number of physical computer.
-
- :returns: the number of cpu core.
-
- """
-
- # On certain platforms, this will raise a NotImplementedError.
- try:
- return self.baremetal_nodes.get_hw_info('vcpus')
- except NotImplementedError:
- LOG.warn(_("Cannot get the number of cpu, because this "
- "function is not implemented for this platform. "
- "This error can be safely ignored for now."))
- return False
-
- def get_memory_mb_total(self):
- """Get the total memory size(MB) of physical computer.
-
- :returns: the total amount of memory(MB).
-
- """
- return self.baremetal_nodes.get_hw_info('memory_mb')
-
- def get_local_gb_total(self):
- """Get the total hdd size(GB) of physical computer.
-
- :returns:
- The total amount of HDD(GB).
- Note that this value shows a partition where
- NOVA-INST-DIR/instances mounts.
-
- """
- return self.baremetal_nodes.get_hw_info('local_gb')
-
- def get_vcpu_used(self):
- """ Get vcpu usage number of physical computer.
-
- :returns: The total number of vcpu that currently used.
-
- """
- return len(self._conn.list_domains())
-
- def get_memory_mb_used(self):
- """Get the free memory size(MB) of physical computer.
-
- :returns: the total usage of memory(MB).
-
- """
- return self.baremetal_nodes.get_hw_info('memory_mb_used')
-
- def get_local_gb_used(self):
- """Get the free hdd size(GB) of physical computer.
-
- :returns:
- The total usage of HDD(GB).
- Note that this value shows a partition where
- NOVA-INST-DIR/instances mounts.
-
- """
- return self.baremetal_nodes.get_hw_info('local_gb_used')
-
- def get_hypervisor_type(self):
- """Get hypervisor type.
-
- :returns: hypervisor type (ex. qemu)
-
- """
- return self.baremetal_nodes.get_hw_info('hypervisor_type')
-
- def get_hypervisor_version(self):
- """Get hypervisor version.
-
- :returns: hypervisor version (ex. 12003)
-
- """
- return self.baremetal_nodes.get_hw_info('hypervisor_version')
-
- def get_cpu_info(self):
- """Get cpuinfo information.
-
- Obtains cpu feature from virConnect.getCapabilities,
- and returns as a json string.
-
- :return: see above description
-
- """
- return self.baremetal_nodes.get_hw_info('cpu_info')
-
- def block_stats(self, instance_name, disk):
- raise NotImplementedError()
-
- def interface_stats(self, instance_name, interface):
- raise NotImplementedError()
-
- def get_console_pool_info(self, console_type):
- #TODO(mdragon): console proxy should be implemented for baremetal,
- # in case someone wants to use it.
- # For now return fake data.
- return {'address': '127.0.0.1',
- 'username': 'fakeuser',
- 'password': 'fakepassword'}
-
- def refresh_security_group_rules(self, security_group_id):
- # Bare metal doesn't currently support security groups
- pass
-
- def refresh_security_group_members(self, security_group_id):
- # Bare metal doesn't currently support security groups
- pass
-
- def refresh_instance_security_rules(self, instance):
- # Bare metal doesn't currently support security groups
- pass
-
- def get_available_resource(self):
- """Updates compute manager resource info on ComputeNode table.
-
- This method is called when nova-coompute launches, and
- whenever admin executes "nova-manage service update_resource".
- """
-
- # Updating host information
- dic = {'vcpus': self.get_vcpu_total(),
- 'memory_mb': self.get_memory_mb_total(),
- 'local_gb': self.get_local_gb_total(),
- 'vcpus_used': self.get_vcpu_used(),
- 'memory_mb_used': self.get_memory_mb_used(),
- 'local_gb_used': self.get_local_gb_used(),
- 'hypervisor_type': self.get_hypervisor_type(),
- 'hypervisor_version': self.get_hypervisor_version(),
- 'hypervisor_hostname': FLAGS.host,
- 'cpu_info': self.get_cpu_info(),
- 'cpu_arch': FLAGS.cpu_arch}
-
- LOG.info(_('#### RLK: cpu_arch = %s ') % FLAGS.cpu_arch)
- return dic
-
- def ensure_filtering_rules_for_instance(self, instance_ref, network_info):
- raise NotImplementedError()
-
- def live_migration(self, ctxt, instance_ref, dest,
- post_method, recover_method):
- raise NotImplementedError()
-
- def unfilter_instance(self, instance_ref):
- """See comments of same method in firewall_driver."""
- pass
-
- def get_host_stats(self, refresh=False):
- """Return the current state of the host. If 'refresh' is
- True, run the update first."""
- LOG.debug(_("Updating!"))
- return self.HostState.get_host_stats(refresh=refresh)
-
-
-class HostState(object):
- """Manages information about the XenServer host this compute
- node is running on.
- """
-
- def __init__(self, connection):
- super(HostState, self).__init__()
- self.connection = connection
- self._stats = {}
- self.update_status()
-
- def get_host_stats(self, refresh=False):
- """Return the current state of the host. If 'refresh' is
- True, run the update first.
- """
- if refresh:
- self.update_status()
- return self._stats
-
- def update_status(self):
- """
- We can get host status information.
- """
- LOG.debug(_("Updating host stats"))
- data = {}
- data["vcpus"] = self.connection.get_vcpu_total()
- data["vcpus_used"] = self.connection.get_vcpu_used()
- data["cpu_info"] = self.connection.get_cpu_info()
- data["cpu_arch"] = FLAGS.cpu_arch
- data["disk_total"] = self.connection.get_local_gb_total()
- data["disk_used"] = self.connection.get_local_gb_used()
- data["disk_available"] = data["disk_total"] - data["disk_used"]
- data["host_memory_total"] = self.connection.get_memory_mb_total()
- data["host_memory_free"] = (data["host_memory_total"] -
- self.connection.get_memory_mb_used())
- data["hypervisor_type"] = self.connection.get_hypervisor_type()
- data["hypervisor_version"] = self.connection.get_hypervisor_version()
- data["hypervisor_hostname"] = FLAGS.host
- self._stats = data
diff --git a/nova/virt/baremetal/fake.py b/nova/virt/baremetal/fake.py
deleted file mode 100644
index 635089366..000000000
--- a/nova/virt/baremetal/fake.py
+++ /dev/null
@@ -1,157 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2011 University of Southern California
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-
-def get_baremetal_nodes():
- return BareMetalNodes()
-
-
-class BareMetalNodes(object):
- """
- This manages node information and implements singleton.
-
- BareMetalNodes class handles machine architectures of interest to
- technical computing users have either poor or non-existent support
- for virtualization.
- """
-
- def get_hw_info(self, field):
- """
- Returns hardware information of bare-metal node by the given field.
-
- Given field can be vcpus, memory_mb, local_gb, memory_mb_used,
- local_gb_used, hypervisor_type, hypervisor_version, and cpu_info.
- """
- return "fake"
-
- def set_status(self, node_id, status):
- """
- Sets status of the given node by the given status.
-
- Returns 1 if the node is in the nodes list.
- """
- return True
-
- def get_status(self):
- """
- Gets status of the given node.
- """
- pass
-
- def get_idle_node(self):
- """
- Gets an idle node, sets the status as 1 (RUNNING) and Returns node ID.
- """
- return False
-
- def get_ip_by_id(self, id):
- """
- Returns default IP address of the given node.
- """
- return "127.0.0.1"
-
- def free_node(self, node_id):
- """
- Sets/frees status of the given node as 0 (IDLE).
- """
- return False
-
- def power_mgr(self, node_id, mode):
- """
- Changes power state of the given node.
-
- According to the mode (1-ON, 2-OFF, 3-REBOOT), power state can be
- changed. /tftpboot/pdu_mgr script handles power management of
- PDU (Power Distribution Unit).
- """
- pass
-
- def deactivate_node(self, node_id):
- """
- Deactivates the given node by turnning it off.
- """
- pass
-
- def network_set(self, node_ip, mac_address, ip_address):
- """
- Sets network configuration based on the given ip and mac address.
-
- User can access the bare-metal node using ssh.
- """
- pass
-
- def iptables_set(self, node_ip, user_data):
- """
- Sets security setting (iptables:port) if needed.
- """
- pass
-
- def check_activated(self, node_id, node_ip):
- """
- Checks whether the given node is activated or not.
- """
- pass
-
- def vmlinux_set(self, node_id, mode):
- """
- Sets kernel into default path (/tftpboot) if needed.
-
- From basepath to /tftpboot, kernel is set based on the given mode
- such as 0-NoSet, 1-SetVmlinux, or 9-RemoveVmlinux.
- """
- pass
-
- def sleep_mgr(self, time):
- """
- Sleeps until the node is activated.
- """
- pass
-
- def ssh_set(self, node_ip):
- """
- Sets and Runs sshd in the node.
- """
- pass
-
- def activate_node(self, node_id, node_ip, name, mac_address,
- ip_address):
- """
- Activates the given node using ID, IP, and MAC address.
- """
- pass
-
- def get_console_output(self, console_log):
- """
- Gets console output of the given node.
- """
- pass
-
- def get_image(self, bp):
- """
- Gets the bare-metal file system image into the instance path.
-
- Noting to do for tilera nodes: actual image is used.
- """
- pass
-
- def set_image(self, bpath, node_id):
- """
- Sets the PXE bare-metal file system from the instance path.
-
- This should be done after ssh key is injected.
- """
- pass
diff --git a/nova/virt/baremetal/nodes.py b/nova/virt/baremetal/nodes.py
deleted file mode 100644
index b4f2a50e2..000000000
--- a/nova/virt/baremetal/nodes.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2011 University of Southern California
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-from nova import exception
-from nova import flags
-from nova.openstack.common import cfg
-from nova.virt.baremetal import fake
-from nova.virt.baremetal import tilera
-
-FLAGS = flags.FLAGS
-
-baremetal_opts = [
- cfg.StrOpt('baremetal_driver',
- default='tilera',
- help='Bare-metal driver runs on')
- ]
-
-FLAGS.register_opts(baremetal_opts)
-
-
-def get_baremetal_nodes():
- d = FLAGS.baremetal_driver
- if d == 'tilera':
- return tilera.get_baremetal_nodes()
- elif d == 'fake':
- return fake.get_baremetal_nodes()
- else:
- raise exception.NovaException(_("Unknown baremetal driver %(d)s"))
diff --git a/nova/virt/baremetal/tilera.py b/nova/virt/baremetal/tilera.py
deleted file mode 100644
index 4d4a37007..000000000
--- a/nova/virt/baremetal/tilera.py
+++ /dev/null
@@ -1,364 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2011 University of Southern California
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Tilera back-end for bare-metal compute node provisioning
-
-The details of this implementation are specific to ISI's testbed. This code
-is provided here as an example of how to implement a backend.
-"""
-
-import base64
-import subprocess
-import time
-
-from nova.compute import power_state
-from nova import exception
-from nova import flags
-from nova.openstack.common import cfg
-from nova.openstack.common import log as logging
-from nova import utils
-
-FLAGS = flags.FLAGS
-
-tilera_opts = [
- cfg.StrOpt('tile_monitor',
- default='/usr/local/TileraMDE/bin/tile-monitor',
- help='Tilera command line program for Bare-metal driver')
- ]
-
-FLAGS.register_opts(tilera_opts)
-
-LOG = logging.getLogger(__name__)
-
-
-def get_baremetal_nodes():
- return BareMetalNodes()
-
-
-class BareMetalNodes(object):
- """
- This manages node information and implements singleton.
-
- BareMetalNodes class handles machine architectures of interest to
- technical computing users have either poor or non-existent support
- for virtualization.
- """
-
- _instance = None
- _is_init = False
-
- def __new__(cls, *args, **kwargs):
- """
- Returns the BareMetalNodes singleton.
- """
- if not cls._instance or ('new' in kwargs and kwargs['new']):
- cls._instance = super(BareMetalNodes, cls).__new__(cls)
- return cls._instance
-
- def __init__(self, file_name="/tftpboot/tilera_boards"):
- """
- Only call __init__ the first time object is instantiated.
-
- From the bare-metal node list file: /tftpboot/tilera_boards,
- reads each item of each node such as node ID, IP address,
- MAC address, vcpus, memory, hdd, hypervisor type/version, and cpu
- and appends each node information into nodes list.
- """
- if self._is_init:
- return
- self._is_init = True
-
- self.nodes = []
- self.BOARD_ID = 0
- self.IP_ADDR = 1
- self.MAC_ADDR = 2
- self.VCPUS = 3
- self.MEMORY_MB = 4
- self.LOCAL_GB = 5
- self.MEMORY_MB_USED = 6
- self.LOCAL_GB_USED = 7
- self.HYPERVISOR_TYPE = 8
- self.HYPERVISOR_VER = 9
- self.CPU_INFO = 10
-
- fp = open(file_name, "r")
- for item in fp:
- l = item.split()
- if l[0] == '#':
- continue
- l_d = {'node_id': int(l[self.BOARD_ID]),
- 'ip_addr': l[self.IP_ADDR],
- 'mac_addr': l[self.MAC_ADDR],
- 'status': power_state.NOSTATE,
- 'vcpus': int(l[self.VCPUS]),
- 'memory_mb': int(l[self.MEMORY_MB]),
- 'local_gb': int(l[self.LOCAL_GB]),
- 'memory_mb_used': int(l[self.MEMORY_MB_USED]),
- 'local_gb_used': int(l[self.LOCAL_GB_USED]),
- 'hypervisor_type': l[self.HYPERVISOR_TYPE],
- 'hypervisor_version': int(l[self.HYPERVISOR_VER]),
- 'cpu_info': l[self.CPU_INFO]}
- self.nodes.append(l_d)
- fp.close()
-
- def get_hw_info(self, field):
- """
- Returns hardware information of bare-metal node by the given field.
-
- Given field can be vcpus, memory_mb, local_gb, memory_mb_used,
- local_gb_used, hypervisor_type, hypervisor_version, and cpu_info.
- """
- for node in self.nodes:
- if node['node_id'] == 9:
- if field == 'vcpus':
- return node['vcpus']
- elif field == 'memory_mb':
- return node['memory_mb']
- elif field == 'local_gb':
- return node['local_gb']
- elif field == 'memory_mb_used':
- return node['memory_mb_used']
- elif field == 'local_gb_used':
- return node['local_gb_used']
- elif field == 'hypervisor_type':
- return node['hypervisor_type']
- elif field == 'hypervisor_version':
- return node['hypervisor_version']
- elif field == 'cpu_info':
- return node['cpu_info']
-
- def set_status(self, node_id, status):
- """
- Sets status of the given node by the given status.
-
- Returns 1 if the node is in the nodes list.
- """
- for node in self.nodes:
- if node['node_id'] == node_id:
- node['status'] = status
- return True
- return False
-
- def get_status(self):
- """
- Gets status of the given node.
- """
- pass
-
- def get_idle_node(self):
- """
- Gets an idle node, sets the status as 1 (RUNNING) and Returns node ID.
- """
- for item in self.nodes:
- if item['status'] == 0:
- item['status'] = 1 # make status RUNNING
- return item['node_id']
- raise exception.NotFound("No free nodes available")
-
- def get_ip_by_id(self, id):
- """
- Returns default IP address of the given node.
- """
- for item in self.nodes:
- if item['node_id'] == id:
- return item['ip_addr']
-
- def free_node(self, node_id):
- """
- Sets/frees status of the given node as 0 (IDLE).
- """
- LOG.debug(_("free_node...."))
- for item in self.nodes:
- if item['node_id'] == str(node_id):
- item['status'] = 0 # make status IDLE
-
- def power_mgr(self, node_id, mode):
- """
- Changes power state of the given node.
-
- According to the mode (1-ON, 2-OFF, 3-REBOOT), power state can be
- changed. /tftpboot/pdu_mgr script handles power management of
- PDU (Power Distribution Unit).
- """
- if node_id < 5:
- pdu_num = 1
- pdu_outlet_num = node_id + 5
- else:
- pdu_num = 2
- pdu_outlet_num = node_id
- path1 = "10.0.100." + str(pdu_num)
- utils.execute('/tftpboot/pdu_mgr', path1, str(pdu_outlet_num),
- str(mode), '>>', 'pdu_output')
-
- def deactivate_node(self, node_id):
- """
- Deactivates the given node by turnning it off.
-
- /tftpboot/fs_x directory is a NFS of node#x
- and /tftpboot/root_x file is an file system image of node#x.
- """
- node_ip = self.get_ip_by_id(node_id)
- LOG.debug(_("deactivate_node is called for "
- "node_id = %(id)s node_ip = %(ip)s"),
- {'id': str(node_id), 'ip': node_ip})
- for item in self.nodes:
- if item['node_id'] == node_id:
- LOG.debug(_("status of node is set to 0"))
- item['status'] = 0
- self.power_mgr(node_id, 2)
- self.sleep_mgr(5)
- path = "/tftpboot/fs_" + str(node_id)
- pathx = "/tftpboot/root_" + str(node_id)
- utils.execute('sudo', '/usr/sbin/rpc.mountd')
- try:
- utils.execute('sudo', 'umount', '-f', pathx)
- utils.execute('sudo', 'rm', '-f', pathx)
- except Exception:
- LOG.debug(_("rootfs is already removed"))
-
- def network_set(self, node_ip, mac_address, ip_address):
- """
- Sets network configuration based on the given ip and mac address.
-
- User can access the bare-metal node using ssh.
- """
- cmd = (FLAGS.tile_monitor +
- " --resume --net " + node_ip + " --run - " +
- "ifconfig xgbe0 hw ether " + mac_address +
- " - --wait --run - ifconfig xgbe0 " + ip_address +
- " - --wait --quit")
- subprocess.Popen(cmd, shell=True)
- #utils.execute(cmd, shell=True)
- self.sleep_mgr(5)
-
- def iptables_set(self, node_ip, user_data):
- """
- Sets security setting (iptables:port) if needed.
-
- iptables -A INPUT -p tcp ! -s $IP --dport $PORT -j DROP
- /tftpboot/iptables_rule script sets iptables rule on the given node.
- """
- if user_data != '':
- open_ip = base64.b64decode(user_data)
- utils.execute('/tftpboot/iptables_rule', node_ip, open_ip)
-
- def check_activated(self, node_id, node_ip):
- """
- Checks whether the given node is activated or not.
- """
- LOG.debug(_("Before ping to the bare-metal node"))
- tile_output = "/tftpboot/tile_output_" + str(node_id)
- grep_cmd = ("ping -c1 " + node_ip + " | grep Unreachable > " +
- tile_output)
- subprocess.Popen(grep_cmd, shell=True)
- self.sleep_mgr(5)
-
- file = open(tile_output, "r")
- out_msg = file.readline().find("Unreachable")
- utils.execute('sudo', 'rm', tile_output)
- if out_msg == -1:
- LOG.debug(_("TILERA_BOARD_#%(node_id)s %(node_ip)s is ready"),
- locals())
- return True
- else:
- LOG.debug(_("TILERA_BOARD_#%(node_id)s %(node_ip)s is not ready,"
- " out_msg=%(out_msg)s"), locals())
- self.power_mgr(node_id, 2)
- return False
-
- def vmlinux_set(self, node_id, mode):
- """
- Sets kernel into default path (/tftpboot) if needed.
-
- From basepath to /tftpboot, kernel is set based on the given mode
- such as 0-NoSet, 1-SetVmlinux, or 9-RemoveVmlinux.
- """
- LOG.debug(_("Noting to do for tilera nodes: vmlinux is in CF"))
-
- def sleep_mgr(self, time_in_seconds):
- """
- Sleeps until the node is activated.
- """
- time.sleep(time_in_seconds)
-
- def ssh_set(self, node_ip):
- """
- Sets and Runs sshd in the node.
- """
- cmd = (FLAGS.tile_monitor +
- " --resume --net " + node_ip + " --run - " +
- "/usr/sbin/sshd - --wait --quit")
- subprocess.Popen(cmd, shell=True)
- self.sleep_mgr(5)
-
- def activate_node(self, node_id, node_ip, name, mac_address,
- ip_address, user_data):
- """
- Activates the given node using ID, IP, and MAC address.
- """
- LOG.debug(_("activate_node"))
-
- self.power_mgr(node_id, 2)
- self.power_mgr(node_id, 3)
- self.sleep_mgr(100)
-
- try:
- self.check_activated(node_id, node_ip)
- self.network_set(node_ip, mac_address, ip_address)
- self.ssh_set(node_ip)
- self.iptables_set(node_ip, user_data)
- return power_state.RUNNING
- except Exception as ex:
- self.deactivate_node(node_id)
- raise exception.NovaException(_("Node is unknown error state."))
-
- def get_console_output(self, console_log, node_id):
- """
- Gets console output of the given node.
- """
- node_ip = self.get_ip_by_id(node_id)
- log_path = "/tftpboot/log_" + str(node_id)
- kmsg_cmd = (FLAGS.tile_monitor +
- " --resume --net " + node_ip +
- " -- dmesg > " + log_path)
- subprocess.Popen(kmsg_cmd, shell=True)
- self.sleep_mgr(5)
- utils.execute('cp', log_path, console_log)
-
- def get_image(self, bp):
- """
- Gets the bare-metal file system image into the instance path.
-
- Noting to do for tilera nodes: actual image is used.
- """
- path_fs = "/tftpboot/tilera_fs"
- path_root = bp + "/root"
- utils.execute('cp', path_fs, path_root)
-
- def set_image(self, bpath, node_id):
- """
- Sets the PXE bare-metal file system from the instance path.
-
- This should be done after ssh key is injected.
- /tftpboot/fs_x directory is a NFS of node#x.
- /tftpboot/root_x file is an file system image of node#x.
- """
- path1 = bpath + "/root"
- pathx = "/tftpboot/root_" + str(node_id)
- path2 = "/tftpboot/fs_" + str(node_id)
- utils.execute('sudo', 'mv', path1, pathx)
- utils.execute('sudo', 'mount', '-o', 'loop', pathx, path2)
diff --git a/nova/virt/configdrive.py b/nova/virt/configdrive.py
index 7b4cb718b..fda9f201f 100644
--- a/nova/virt/configdrive.py
+++ b/nova/virt/configdrive.py
@@ -21,8 +21,8 @@ import os
import shutil
import tempfile
+from nova import config
from nova import exception
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import fileutils
from nova.openstack.common import log as logging
@@ -45,10 +45,14 @@ configdrive_opts = [
default=None,
help='Set to force injection to take place on a config drive '
'(if set, valid options are: always)'),
+ cfg.StrOpt('mkisofs_cmd',
+ default='genisoimage',
+ help='Name and optionally path of the tool used for '
+ 'ISO image creation')
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(configdrive_opts)
+CONF = config.CONF
+CONF.register_opts(configdrive_opts)
class ConfigDriveBuilder(object):
@@ -58,7 +62,7 @@ class ConfigDriveBuilder(object):
# TODO(mikal): I don't think I can use utils.tempdir here, because
# I need to have the directory last longer than the scope of this
# method call
- self.tempdir = tempfile.mkdtemp(dir=FLAGS.config_drive_tempdir,
+ self.tempdir = tempfile.mkdtemp(dir=CONF.config_drive_tempdir,
prefix='cd_gen_')
if instance_md is not None:
@@ -78,7 +82,7 @@ class ConfigDriveBuilder(object):
{'filepath': path})
def _make_iso9660(self, path):
- utils.execute('genisoimage',
+ utils.execute(CONF.mkisofs_cmd,
'-o', path,
'-ldots',
'-allow-lowercase',
@@ -105,7 +109,7 @@ class ConfigDriveBuilder(object):
mounted = False
try:
- mountdir = tempfile.mkdtemp(dir=FLAGS.config_drive_tempdir,
+ mountdir = tempfile.mkdtemp(dir=CONF.config_drive_tempdir,
prefix='cd_mnt_')
_out, err = utils.trycmd('mount', '-o', 'loop', path, mountdir,
run_as_root=True)
@@ -133,13 +137,13 @@ class ConfigDriveBuilder(object):
shutil.rmtree(mountdir)
def make_drive(self, path):
- if FLAGS.config_drive_format == 'iso9660':
+ if CONF.config_drive_format == 'iso9660':
self._make_iso9660(path)
- elif FLAGS.config_drive_format == 'vfat':
+ elif CONF.config_drive_format == 'vfat':
self._make_vfat(path)
else:
raise exception.ConfigDriveUnknownFormat(
- format=FLAGS.config_drive_format)
+ format=CONF.config_drive_format)
def cleanup(self):
if self.imagefile:
@@ -152,7 +156,7 @@ class ConfigDriveBuilder(object):
def required_by(instance):
- return instance.get('config_drive') or FLAGS.force_config_drive
+ return instance.get('config_drive') or CONF.force_config_drive
def enabled_for(instance):
diff --git a/nova/virt/disk/api.py b/nova/virt/disk/api.py
index e113391a5..e34ca58b5 100644
--- a/nova/virt/disk/api.py
+++ b/nova/virt/disk/api.py
@@ -32,15 +32,15 @@ import tempfile
if os.name != 'nt':
import crypt
+from nova import config
from nova import exception
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova import utils
-from nova.virt.disk import guestfs
-from nova.virt.disk import loop
-from nova.virt.disk import nbd
+from nova.virt.disk.mount import guestfs
+from nova.virt.disk.mount import loop
+from nova.virt.disk.mount import nbd
from nova.virt import images
@@ -76,14 +76,14 @@ disk_opts = [
'The format is <os_type>=<mkfs command>'),
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(disk_opts)
+CONF = config.CONF
+CONF.register_opts(disk_opts)
_MKFS_COMMAND = {}
_DEFAULT_MKFS_COMMAND = None
-for s in FLAGS.virt_mkfs:
+for s in CONF.virt_mkfs:
# NOTE(yamahata): mkfs command may includes '=' for its options.
# So item.partition('=') doesn't work here
os_type, mkfs_command = s.split('=', 1)
@@ -156,13 +156,6 @@ def bind(src, target, instance_name):
utils.execute('touch', target, run_as_root=True)
utils.execute('mount', '-o', 'bind', src, target,
run_as_root=True)
- s = os.stat(src)
- cgroup_info = "b %s:%s rwm\n" % (os.major(s.st_rdev),
- os.minor(s.st_rdev))
- cgroups_path = ("/sys/fs/cgroup/devices/libvirt/lxc/"
- "%s/devices.allow" % instance_name)
- utils.execute('tee', cgroups_path,
- process_input=cgroup_info, run_as_root=True)
def unbind(target):
@@ -188,7 +181,7 @@ class _DiskImage(object):
# As a performance tweak, don't bother trying to
# directly loopback mount a cow image.
- self.handlers = FLAGS.img_handlers[:]
+ self.handlers = CONF.img_handlers[:]
if use_cow and 'loop' in self.handlers:
self.handlers.remove('loop')
@@ -235,7 +228,7 @@ class _DiskImage(object):
@staticmethod
def _handler_class(mode=None, device=None):
"""Look up the appropriate class to use based on MODE or DEVICE."""
- for cls in (loop.Mount, nbd.Mount, guestfs.Mount):
+ for cls in (loop.LoopMount, nbd.NbdMount, guestfs.GuestFSMount):
if mode and cls.mode == mode:
return cls
elif device and cls.device_id_string in device:
diff --git a/nova/virt/disk/mount/__init__.py b/nova/virt/disk/mount/__init__.py
new file mode 100644
index 000000000..5c18da32d
--- /dev/null
+++ b/nova/virt/disk/mount/__init__.py
@@ -0,0 +1,19 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Support for mounting disk images on the host filesystem
+
+"""
diff --git a/nova/virt/disk/mount.py b/nova/virt/disk/mount/api.py
index e683658d2..e683658d2 100644
--- a/nova/virt/disk/mount.py
+++ b/nova/virt/disk/mount/api.py
diff --git a/nova/virt/disk/guestfs.py b/nova/virt/disk/mount/guestfs.py
index 21e33b1d7..2e29b9b43 100644
--- a/nova/virt/disk/guestfs.py
+++ b/nova/virt/disk/mount/guestfs.py
@@ -19,10 +19,10 @@ import os
from nova import exception
from nova import utils
-from nova.virt.disk import mount
+from nova.virt.disk.mount import api
-class Mount(mount.Mount):
+class GuestFSMount(api.Mount):
"""libguestfs support for arbitrary images."""
mode = 'guestfs'
device_id_string = 'guest'
diff --git a/nova/virt/disk/loop.py b/nova/virt/disk/mount/loop.py
index 3dfdc32d3..9b87b6df5 100644
--- a/nova/virt/disk/loop.py
+++ b/nova/virt/disk/mount/loop.py
@@ -16,10 +16,10 @@
"""Support for mounting images with the loop device"""
from nova import utils
-from nova.virt.disk import mount
+from nova.virt.disk.mount import api
-class Mount(mount.Mount):
+class LoopMount(api.Mount):
"""loop back support for raw images."""
mode = 'loop'
device_id_string = mode
diff --git a/nova/virt/disk/nbd.py b/nova/virt/disk/mount/nbd.py
index 10895d5c9..a6eb77199 100644
--- a/nova/virt/disk/nbd.py
+++ b/nova/virt/disk/mount/nbd.py
@@ -18,10 +18,10 @@
import os
import time
-from nova import flags
+from nova import config
from nova.openstack.common import cfg
from nova import utils
-from nova.virt.disk import mount
+from nova.virt.disk.mount import api
nbd_opts = [
@@ -33,11 +33,11 @@ nbd_opts = [
help='maximum number of possible nbd devices'),
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(nbd_opts)
+CONF = config.CONF
+CONF.register_opts(nbd_opts)
-class Mount(mount.Mount):
+class NbdMount(api.Mount):
"""qemu-nbd support disk images."""
mode = 'nbd'
device_id_string = mode
@@ -52,7 +52,7 @@ class Mount(mount.Mount):
# are no free devices. Note that patch currently hardcodes 16 devices.
# We might be able to alleviate problem 2. by scanning /proc/partitions
# like the aformentioned patch does.
- _DEVICES = ['/dev/nbd%s' % i for i in range(FLAGS.max_nbd_devices)]
+ _DEVICES = ['/dev/nbd%s' % i for i in range(CONF.max_nbd_devices)]
def _allocate_nbd(self):
if not os.path.exists("/sys/block/nbd0"):
@@ -89,7 +89,7 @@ class Mount(mount.Mount):
# NOTE(vish): this forks into another process, so give it a chance
# to set up before continuing
- for _i in range(FLAGS.timeout_nbd):
+ for _i in range(CONF.timeout_nbd):
if os.path.exists("/sys/block/%s/pid" % os.path.basename(device)):
self.device = device
break
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index a466fa180..12f19bf3b 100644
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -22,12 +22,10 @@ Driver base-classes:
types that support that contract
"""
-from nova import flags
+from nova import config
from nova.openstack.common import log as logging
-
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
def block_device_info_get_root(block_device_info):
@@ -349,12 +347,15 @@ class ComputeDriver(object):
"""Restore the specified instance"""
raise NotImplementedError()
- def get_available_resource(self):
+ def get_available_resource(self, nodename):
"""Retrieve resource information.
This method is called when nova-compute launches, and
as part of a periodic task
+ :param nodename:
+ node which the caller want to get resources from
+ a driver that manages only one node can safely ignore this
:returns: Dictionary describing resources
"""
raise NotImplementedError()
@@ -737,3 +738,16 @@ class ComputeDriver(object):
}
"""
raise NotImplementedError()
+
+ def get_available_nodes(self):
+ """Returns nodenames of all nodes managed by the compute service.
+
+ This method is for multi compute-nodes support. If a driver supports
+ multi compute-nodes, this method returns a list of nodenames managed
+ by the service. Otherwise, this method should return
+ [hypervisor_hostname].
+ """
+ stats = self.get_host_stats(refresh=True)
+ if not isinstance(stats, list):
+ stats = [stats]
+ return [s['hypervisor_hostname'] for s in stats]
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 79d98c5cf..49f7b548b 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -36,6 +36,32 @@ from nova.virt import virtapi
LOG = logging.getLogger(__name__)
+_FAKE_NODES = ['fake-mini']
+
+
+def set_nodes(nodes):
+ """Sets FakeDriver's node.list.
+
+ It has effect on the following methods:
+ get_available_nodes()
+ get_available_resource
+ get_host_stats()
+
+ To restore the change, call restore_nodes()
+ """
+ global _FAKE_NODES
+ _FAKE_NODES = nodes
+
+
+def restore_nodes():
+ """Resets FakeDriver's node list modified by set_nodes().
+
+ Usually called from tearDown().
+ """
+ global _FAKE_NODES
+ _FAKE_NODES = ['fake-mini']
+
+
class FakeInstance(object):
def __init__(self, name, state):
@@ -56,7 +82,7 @@ class FakeDriver(driver.ComputeDriver):
def __init__(self, virtapi, read_only=False):
super(FakeDriver, self).__init__(virtapi)
self.instances = {}
- self.host_status = {
+ self.host_status_base = {
'host_name-description': 'Fake Host',
'host_hostname': 'fake-mini',
'host_memory_total': 8000000000,
@@ -252,12 +278,14 @@ class FakeDriver(driver.ComputeDriver):
def refresh_provider_fw_rules(self):
pass
- def get_available_resource(self):
+ def get_available_resource(self, nodename):
"""Updates compute manager resource info on ComputeNode table.
Since we don't have a real hypervisor, pretend we have lots of
disk and ram.
"""
+ if nodename not in _FAKE_NODES:
+ raise exception.NovaException("node %s is not found" % nodename)
dic = {'vcpus': 1,
'memory_mb': 8192,
@@ -267,7 +295,7 @@ class FakeDriver(driver.ComputeDriver):
'local_gb_used': 0,
'hypervisor_type': 'fake',
'hypervisor_version': '1.0',
- 'hypervisor_hostname': 'fake-mini',
+ 'hypervisor_hostname': nodename,
'cpu_info': '?'}
return dic
@@ -319,7 +347,19 @@ class FakeDriver(driver.ComputeDriver):
def get_host_stats(self, refresh=False):
"""Return fake Host Status of ram, disk, network."""
- return self.host_status
+ stats = []
+ for nodename in _FAKE_NODES:
+ host_status = self.host_status_base.copy()
+ host_status['hypervisor_hostname'] = nodename
+ host_status['host_hostname'] = nodename
+ host_status['host_name_label'] = nodename
+ stats.append(host_status)
+ if len(stats) == 0:
+ raise exception.NovaException("FakeDriver has no node")
+ elif len(stats) == 1:
+ return stats[0]
+ else:
+ return stats
def host_power_action(self, host, action):
"""Reboots, shuts down or powers up the host."""
@@ -341,6 +381,9 @@ class FakeDriver(driver.ComputeDriver):
def get_volume_connector(self, instance):
return {'ip': '127.0.0.1', 'initiator': 'fake', 'host': 'fakehost'}
+ def get_available_nodes(self):
+ return _FAKE_NODES
+
class FakeVirtAPI(virtapi.VirtAPI):
def instance_update(self, context, instance_uuid, updates):
@@ -353,3 +396,32 @@ class FakeVirtAPI(virtapi.VirtAPI):
def instance_get_all_by_host(self, context, host):
return db.instance_get_all_by_host(context, host)
+
+ def aggregate_get_by_host(self, context, host, key=None):
+ return db.aggregate_get_by_host(context, host, key)
+
+ def aggregate_metadata_get(self, context, aggregate_id):
+ return db.aggregate_metadata_get(context, aggregate_id)
+
+ def aggregate_metadata_add(self, context, aggregate_id, metadata,
+ set_delete=False):
+ return db.aggregate_metadata_add(context, aggregate_id, metadata,
+ set_delete)
+
+ def aggregate_metadata_delete(self, context, aggregate_id, key):
+ return db.aggregate_metadata_delete(context, aggregate_id, key)
+
+ def security_group_get_by_instance(self, context, instance_uuid):
+ return db.security_group_get_by_instance(context, instance_uuid)
+
+ def security_group_rule_get_by_security_group(self, context,
+ security_group_id):
+ return db.security_group_rule_get_by_security_group(context,
+ security_group_id)
+
+ def provider_fw_rule_get_all(self, context):
+ return db.provider_fw_rule_get_all(context)
+
+ def agent_build_get_by_triple(self, context, hypervisor, os, architecture):
+ return db.agent_build_get_by_triple(context,
+ hypervisor, os, architecture)
diff --git a/nova/virt/firewall.py b/nova/virt/firewall.py
index d066a9c21..7c6e95beb 100644
--- a/nova/virt/firewall.py
+++ b/nova/virt/firewall.py
@@ -17,9 +17,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+from nova import config
from nova import context
-from nova import db
-from nova import flags
from nova import network
from nova.network import linux_net
from nova.openstack.common import cfg
@@ -41,12 +40,12 @@ firewall_opts = [
help='Whether to allow network traffic from same network'),
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(firewall_opts)
+CONF = config.CONF
+CONF.register_opts(firewall_opts)
def load_driver(default, *args, **kwargs):
- fw_class = importutils.import_class(FLAGS.firewall_driver or default)
+ fw_class = importutils.import_class(CONF.firewall_driver or default)
return fw_class(*args, **kwargs)
@@ -56,6 +55,9 @@ class FirewallDriver(object):
Defines methods that any driver providing security groups
and provider fireall functionality should implement.
"""
+ def __init__(self, virtapi):
+ self._virtapi = virtapi
+
def prepare_instance_filter(self, instance, network_info):
"""Prepare filters for the instance.
At this point, the instance isn't running yet."""
@@ -139,7 +141,8 @@ class FirewallDriver(object):
class IptablesFirewallDriver(FirewallDriver):
"""Driver which enforces security groups through iptables rules."""
- def __init__(self, **kwargs):
+ def __init__(self, virtapi, **kwargs):
+ super(IptablesFirewallDriver, self).__init__(virtapi)
self.iptables = linux_net.iptables_manager
self.instances = {}
self.network_infos = {}
@@ -204,7 +207,7 @@ class IptablesFirewallDriver(FirewallDriver):
ipv4_rules = self._create_filter(ips_v4, chain_name)
ipv6_rules = []
- if FLAGS.use_ipv6:
+ if CONF.use_ipv6:
ips_v6 = [ip['ip'] for (_n, mapping) in network_info
for ip in mapping['ip6s']]
ipv6_rules = self._create_filter(ips_v6, chain_name)
@@ -215,7 +218,7 @@ class IptablesFirewallDriver(FirewallDriver):
for rule in ipv4_rules:
self.iptables.ipv4['filter'].add_rule(chain_name, rule)
- if FLAGS.use_ipv6:
+ if CONF.use_ipv6:
for rule in ipv6_rules:
self.iptables.ipv6['filter'].add_rule(chain_name, rule)
@@ -223,7 +226,7 @@ class IptablesFirewallDriver(FirewallDriver):
inst_ipv6_rules):
network_info = self.network_infos[instance['id']]
chain_name = self._instance_chain_name(instance)
- if FLAGS.use_ipv6:
+ if CONF.use_ipv6:
self.iptables.ipv6['filter'].add_chain(chain_name)
self.iptables.ipv4['filter'].add_chain(chain_name)
ipv4_rules, ipv6_rules = self._filters_for_instance(chain_name,
@@ -235,7 +238,7 @@ class IptablesFirewallDriver(FirewallDriver):
chain_name = self._instance_chain_name(instance)
self.iptables.ipv4['filter'].remove_chain(chain_name)
- if FLAGS.use_ipv6:
+ if CONF.use_ipv6:
self.iptables.ipv6['filter'].remove_chain(chain_name)
@staticmethod
@@ -276,7 +279,7 @@ class IptablesFirewallDriver(FirewallDriver):
cidrs = [network['cidr'] for (network, _i) in network_info]
for cidr in cidrs:
ipv4_rules.append('-s %s -j ACCEPT' % (cidr,))
- if FLAGS.use_ipv6:
+ if CONF.use_ipv6:
cidrv6s = [network['cidr_v6'] for (network, _i) in
network_info]
@@ -335,23 +338,23 @@ class IptablesFirewallDriver(FirewallDriver):
self._do_dhcp_rules(ipv4_rules, network_info)
#Allow project network traffic
- if FLAGS.allow_same_net_traffic:
+ if CONF.allow_same_net_traffic:
self._do_project_network_rules(ipv4_rules, ipv6_rules,
network_info)
- # We wrap these in FLAGS.use_ipv6 because they might cause
+ # We wrap these in CONF.use_ipv6 because they might cause
# a DB lookup. The other ones are just list operations, so
# they're not worth the clutter.
- if FLAGS.use_ipv6:
+ if CONF.use_ipv6:
# Allow RA responses
self._do_ra_rules(ipv6_rules, network_info)
- security_groups = db.security_group_get_by_instance(ctxt,
- instance['id'])
+ security_groups = self._virtapi.security_group_get_by_instance(
+ ctxt, instance['id'])
# then, security group chains and rules
for security_group in security_groups:
- rules = db.security_group_rule_get_by_security_group(ctxt,
- security_group['id'])
+ rules = self._virtapi.security_group_rule_get_by_security_group(
+ ctxt, security_group['id'])
for rule in rules:
LOG.debug(_('Adding security group rule: %r'), rule,
@@ -462,29 +465,28 @@ class IptablesFirewallDriver(FirewallDriver):
def _purge_provider_fw_rules(self):
"""Remove all rules from the provider chains."""
self.iptables.ipv4['filter'].empty_chain('provider')
- if FLAGS.use_ipv6:
+ if CONF.use_ipv6:
self.iptables.ipv6['filter'].empty_chain('provider')
def _build_provider_fw_rules(self):
"""Create all rules for the provider IP DROPs."""
self.iptables.ipv4['filter'].add_chain('provider')
- if FLAGS.use_ipv6:
+ if CONF.use_ipv6:
self.iptables.ipv6['filter'].add_chain('provider')
ipv4_rules, ipv6_rules = self._provider_rules()
for rule in ipv4_rules:
self.iptables.ipv4['filter'].add_rule('provider', rule)
- if FLAGS.use_ipv6:
+ if CONF.use_ipv6:
for rule in ipv6_rules:
self.iptables.ipv6['filter'].add_rule('provider', rule)
- @staticmethod
- def _provider_rules():
+ def _provider_rules(self):
"""Generate a list of rules from provider for IP4 & IP6."""
ctxt = context.get_admin_context()
ipv4_rules = []
ipv6_rules = []
- rules = db.provider_fw_rule_get_all(ctxt)
+ rules = self._virtapi.provider_fw_rule_get_all(ctxt)
for rule in rules:
LOG.debug(_('Adding provider rule: %s'), rule['cidr'])
version = netutils.get_ip_version(rule['cidr'])
diff --git a/nova/virt/hyperv/constants.py b/nova/virt/hyperv/constants.py
index 392dcfa13..29a98d345 100644
--- a/nova/virt/hyperv/constants.py
+++ b/nova/virt/hyperv/constants.py
@@ -52,3 +52,6 @@ VM_SUMMARY_NUM_PROCS = 4
VM_SUMMARY_ENABLED_STATE = 100
VM_SUMMARY_MEMORY_USAGE = 103
VM_SUMMARY_UPTIME = 105
+
+IDE_DISK = "VHD"
+IDE_DVD = "DVD"
diff --git a/nova/virt/hyperv/driver.py b/nova/virt/hyperv/driver.py
index 6d9f66ff8..e62845b5b 100644
--- a/nova/virt/hyperv/driver.py
+++ b/nova/virt/hyperv/driver.py
@@ -91,8 +91,8 @@ class HyperVDriver(driver.ComputeDriver):
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
- self._vmops.spawn(context, instance, image_meta, network_info,
- block_device_info)
+ self._vmops.spawn(context, instance, image_meta, injected_files,
+ admin_password, network_info, block_device_info)
def reboot(self, instance, network_info, reboot_type,
block_device_info=None):
@@ -122,7 +122,7 @@ class HyperVDriver(driver.ComputeDriver):
def poll_rescued_instances(self, timeout):
pass
- def get_available_resource(self):
+ def get_available_resource(self, nodename):
return self._hostops.get_available_resource()
def get_host_stats(self, refresh=False):
diff --git a/nova/virt/hyperv/hostops.py b/nova/virt/hyperv/hostops.py
index a2f9d5904..627cf72bc 100644
--- a/nova/virt/hyperv/hostops.py
+++ b/nova/virt/hyperv/hostops.py
@@ -19,11 +19,14 @@
Management class for host operations.
"""
import multiprocessing
+import os
import platform
+from nova import config
from nova.openstack.common import log as logging
from nova.virt.hyperv import baseops
+CONF = config.CONF
LOG = logging.getLogger(__name__)
@@ -55,19 +58,21 @@ class HostOps(baseops.BaseOps):
total_mb = long(total_kb) / 1024
return total_mb
- def _get_local_gb_total(self):
- """Get the total hdd size(GB) of physical computer.
+ def _get_local_hdd_info_gb(self):
+ """Get the total and used size of the volume containing
+ CONF.instances_path expressed in GB.
:returns:
- The total amount of HDD(GB).
- Note that this value shows a partition where
- NOVA-INST-DIR/instances mounts.
+ A tuple with the total and used space in GB.
"""
- #TODO(jordanrinke): This binds to C only right now,
- #need to bind to instance dir
- total_kb = self._conn_cimv2.query(
- "SELECT Size FROM win32_logicaldisk WHERE DriveType=3")[0].Size
- total_gb = long(total_kb) / (1024 ** 3)
- return total_gb
+ normalized_path = os.path.normpath(CONF.instances_path)
+ drive, path = os.path.splitdrive(normalized_path)
+ hdd_info = self._conn_cimv2.query(
+ ("SELECT FreeSpace,Size FROM win32_logicaldisk WHERE DeviceID='%s'"
+ ) % drive)[0]
+ total_gb = long(hdd_info.Size) / (1024 ** 3)
+ free_gb = long(hdd_info.FreeSpace) / (1024 ** 3)
+ used_gb = total_gb - free_gb
+ return total_gb, used_gb
def _get_vcpu_used(self):
""" Get vcpu usage number of physical computer.
@@ -88,21 +93,6 @@ class HostOps(baseops.BaseOps):
return total_mb
- def _get_local_gb_used(self):
- """Get the free hdd size(GB) of physical computer.
- :returns:
- The total usage of HDD(GB).
- Note that this value shows a partition where
- NOVA-INST-DIR/instances mounts.
- """
- #TODO(jordanrinke): This binds to C only right now,
- #need to bind to instance dir
- total_kb = self._conn_cimv2.query(
- "SELECT FreeSpace FROM win32_logicaldisk WHERE DriveType=3")[0]\
- .FreeSpace
- total_gb = long(total_kb) / (1024 ** 3)
- return total_gb
-
def _get_hypervisor_version(self):
"""Get hypervisor version.
:returns: hypervisor version (ex. 12003)
@@ -123,13 +113,14 @@ class HostOps(baseops.BaseOps):
"""
LOG.info(_('get_available_resource called'))
+ local_gb, used_gb = self._get_local_hdd_info_gb()
# TODO(alexpilotti) implemented cpu_info
dic = {'vcpus': self._get_vcpu_total(),
'memory_mb': self._get_memory_mb_total(),
- 'local_gb': self._get_local_gb_total(),
+ 'local_gb': local_gb,
'vcpus_used': self._get_vcpu_used(),
'memory_mb_used': self._get_memory_mb_used(),
- 'local_gb_used': self._get_local_gb_used(),
+ 'local_gb_used': used_gb,
'hypervisor_type': "hyperv",
'hypervisor_version': self._get_hypervisor_version(),
'hypervisor_hostname': platform.node(),
@@ -141,8 +132,7 @@ class HostOps(baseops.BaseOps):
LOG.debug(_("Updating host stats"))
data = {}
- data["disk_total"] = self._get_local_gb_total()
- data["disk_used"] = self._get_local_gb_used()
+ data["disk_total"], data["disk_used"] = self._get_local_hdd_info_gb()
data["disk_available"] = data["disk_total"] - data["disk_used"]
data["host_memory_total"] = self._get_memory_mb_total()
data["host_memory_overhead"] = self._get_memory_mb_used()
diff --git a/nova/virt/hyperv/livemigrationops.py b/nova/virt/hyperv/livemigrationops.py
index 16baf4059..1fb0d14bb 100644
--- a/nova/virt/hyperv/livemigrationops.py
+++ b/nova/virt/hyperv/livemigrationops.py
@@ -21,8 +21,8 @@ Management class for live migration VM operations.
import os
import sys
+from nova import config
from nova import exception
-from nova import flags
from nova.openstack.common import excutils
from nova.openstack.common import log as logging
from nova.virt.hyperv import baseops
@@ -34,7 +34,7 @@ if sys.platform == 'win32':
import wmi
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
+CONF = config.CONF
class LiveMigrationOps(baseops.BaseOps):
@@ -139,7 +139,7 @@ class LiveMigrationOps(baseops.BaseOps):
LOG.debug(_("pre_live_migration called"), instance=instance)
self._check_live_migration_config()
- if FLAGS.use_cow_images:
+ if CONF.use_cow_images:
ebs_root = self._volumeops.volume_in_mapping(
self._volumeops.get_default_root_device(),
block_device_info)
diff --git a/nova/virt/hyperv/snapshotops.py b/nova/virt/hyperv/snapshotops.py
index 5e4676a4a..39ba25b30 100644
--- a/nova/virt/hyperv/snapshotops.py
+++ b/nova/virt/hyperv/snapshotops.py
@@ -22,8 +22,8 @@ import os
import shutil
import sys
+from nova import config
from nova import exception
-from nova import flags
from nova.image import glance
from nova.openstack.common import log as logging
from nova.virt.hyperv import baseops
@@ -36,7 +36,7 @@ from xml.etree import ElementTree
if sys.platform == 'win32':
import wmi
-FLAGS = flags.FLAGS
+CONF = config.CONF
LOG = logging.getLogger(__name__)
@@ -75,7 +75,7 @@ class SnapshotOps(baseops.BaseOps):
f = None
try:
- src_vhd_path = os.path.join(FLAGS.instances_path, instance_name,
+ src_vhd_path = os.path.join(CONF.instances_path, instance_name,
instance_name + ".vhd")
image_man_svc = self._conn.Msvm_ImageManagementService()[0]
diff --git a/nova/virt/hyperv/vmops.py b/nova/virt/hyperv/vmops.py
index e248fd37d..74e8ca7fd 100644
--- a/nova/virt/hyperv/vmops.py
+++ b/nova/virt/hyperv/vmops.py
@@ -21,12 +21,14 @@ Management class for basic VM operations.
import os
import uuid
-from nova import db
+from nova.api.metadata import base as instance_metadata
+from nova import config
from nova import exception
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import lockutils
from nova.openstack.common import log as logging
+from nova import utils
+from nova.virt import configdrive
from nova.virt.hyperv import baseops
from nova.virt.hyperv import constants
from nova.virt.hyperv import vmutils
@@ -40,12 +42,23 @@ hyperv_opts = [
'if none provided first external is used'),
cfg.BoolOpt('limit_cpu_features',
default=False,
- help='required for live migration among '
- 'hosts with different CPU features')
+ help='Required for live migration among '
+ 'hosts with different CPU features'),
+ cfg.BoolOpt('config_drive_inject_password',
+ default=False,
+ help='Sets the admin password in the config drive image'),
+ cfg.StrOpt('qemu_img_cmd',
+ default="qemu-img.exe",
+ help='qemu-img is used to convert between '
+ 'different image types'),
+ cfg.BoolOpt('config_drive_cdrom',
+ default=False,
+ help='Attaches the Config Drive image as a cdrom drive '
+ 'instead of a disk drive')
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(hyperv_opts)
+CONF = config.CONF
+CONF.register_opts(hyperv_opts)
class VMOps(baseops.BaseOps):
@@ -104,8 +117,8 @@ class VMOps(baseops.BaseOps):
'num_cpu': info.NumberOfProcessors,
'cpu_time': info.UpTime}
- def spawn(self, context, instance, image_meta, network_info,
- block_device_info=None):
+ def spawn(self, context, instance, image_meta, injected_files,
+ admin_password, network_info, block_device_info=None):
""" Create a new VM and start it."""
instance_name = instance["name"]
vm = self._vmutils.lookup(self._conn, instance_name)
@@ -128,7 +141,7 @@ class VMOps(baseops.BaseOps):
image_id=instance['image_ref'],
user=instance['user_id'],
project=instance['project_id'],
- cow=FLAGS.use_cow_images)
+ cow=CONF.use_cow_images)
except Exception as exn:
LOG.exception(_('cache image failed: %s'), exn)
self.destroy(instance)
@@ -137,7 +150,8 @@ class VMOps(baseops.BaseOps):
self._create_vm(instance)
if not ebs_root:
- self._create_disk(instance['name'], vhdfile)
+ self._attach_ide_drive(instance['name'], vhdfile, 0, 0,
+ constants.IDE_DISK)
else:
self._volumeops.attach_boot_volume(block_device_info,
instance_name)
@@ -149,13 +163,63 @@ class VMOps(baseops.BaseOps):
mac_address = vif['address'].replace(':', '')
self._create_nic(instance['name'], mac_address)
+ if configdrive.required_by(instance):
+ self._create_config_drive(instance, injected_files,
+ admin_password)
+
LOG.debug(_('Starting VM %s '), instance_name)
self._set_vm_state(instance['name'], 'Enabled')
LOG.info(_('Started VM %s '), instance_name)
except Exception as exn:
LOG.exception(_('spawn vm failed: %s'), exn)
self.destroy(instance)
- raise
+ raise exn
+
+ def _create_config_drive(self, instance, injected_files, admin_password):
+ if CONF.config_drive_format != 'iso9660':
+ vmutils.HyperVException(_('Invalid config_drive_format "%s"') %
+ CONF.config_drive_format)
+
+ LOG.info(_('Using config drive'), instance=instance)
+ extra_md = {}
+ if admin_password and CONF.config_drive_inject_password:
+ extra_md['admin_pass'] = admin_password
+
+ inst_md = instance_metadata.InstanceMetadata(instance,
+ content=injected_files, extra_md=extra_md)
+
+ instance_path = self._vmutils.get_instance_path(
+ instance['name'])
+ configdrive_path_iso = os.path.join(instance_path, 'configdrive.iso')
+ LOG.info(_('Creating config drive at %(path)s'),
+ {'path': configdrive_path_iso}, instance=instance)
+
+ cdb = configdrive.ConfigDriveBuilder(instance_md=inst_md)
+ try:
+ cdb.make_drive(configdrive_path_iso)
+ finally:
+ cdb.cleanup()
+
+ if not CONF.config_drive_cdrom:
+ drive_type = constants.IDE_DISK
+ configdrive_path = os.path.join(instance_path,
+ 'configdrive.vhd')
+ utils.execute(CONF.qemu_img_cmd,
+ 'convert',
+ '-f',
+ 'raw',
+ '-O',
+ 'vpc',
+ configdrive_path_iso,
+ configdrive_path,
+ attempts=1)
+ os.remove(configdrive_path_iso)
+ else:
+ drive_type = constants.IDE_DVD
+ configdrive_path = configdrive_path_iso
+
+ self._attach_ide_drive(instance['name'], configdrive_path, 1, 0,
+ drive_type)
def _create_vm(self, instance):
"""Create a VM but don't start it. """
@@ -200,7 +264,7 @@ class VMOps(baseops.BaseOps):
procsetting.Reservation = vcpus
procsetting.Limit = 100000 # static assignment to 100%
- if FLAGS.limit_cpu_features:
+ if CONF.limit_cpu_features:
procsetting.LimitProcessorFeatures = True
(job, ret_val) = vs_man_svc.ModifyVirtualSystemResources(
@@ -229,60 +293,80 @@ class VMOps(baseops.BaseOps):
_('Failed to add scsi controller to VM %s') %
vm_name)
- def _create_disk(self, vm_name, vhdfile):
- """Create a disk and attach it to the vm"""
- LOG.debug(_('Creating disk for %(vm_name)s by attaching'
- ' disk file %(vhdfile)s') % locals())
+ def _get_ide_controller(self, vm, ctrller_addr):
#Find the IDE controller for the vm.
- vms = self._conn.MSVM_ComputerSystem(ElementName=vm_name)
- vm = vms[0]
vmsettings = vm.associators(
wmi_result_class='Msvm_VirtualSystemSettingData')
rasds = vmsettings[0].associators(
wmi_result_class='MSVM_ResourceAllocationSettingData')
ctrller = [r for r in rasds
if r.ResourceSubType == 'Microsoft Emulated IDE Controller'
- and r.Address == "0"]
+ and r.Address == str(ctrller_addr)]
+ return ctrller
+
+ def _attach_ide_drive(self, vm_name, path, ctrller_addr, drive_addr,
+ drive_type=constants.IDE_DISK):
+ """Create an IDE drive and attach it to the vm"""
+ LOG.debug(_('Creating disk for %(vm_name)s by attaching'
+ ' disk file %(path)s') % locals())
+
+ vms = self._conn.MSVM_ComputerSystem(ElementName=vm_name)
+ vm = vms[0]
+
+ ctrller = self._get_ide_controller(vm, ctrller_addr)
+
+ if drive_type == constants.IDE_DISK:
+ resSubType = 'Microsoft Synthetic Disk Drive'
+ elif drive_type == constants.IDE_DVD:
+ resSubType = 'Microsoft Synthetic DVD Drive'
+
#Find the default disk drive object for the vm and clone it.
- diskdflt = self._conn.query(
+ drivedflt = self._conn.query(
"SELECT * FROM Msvm_ResourceAllocationSettingData \
- WHERE ResourceSubType LIKE 'Microsoft Synthetic Disk Drive'\
- AND InstanceID LIKE '%Default%'")[0]
- diskdrive = self._vmutils.clone_wmi_obj(self._conn,
- 'Msvm_ResourceAllocationSettingData', diskdflt)
+ WHERE ResourceSubType LIKE '%(resSubType)s'\
+ AND InstanceID LIKE '%%Default%%'" % locals())[0]
+ drive = self._vmutils.clone_wmi_obj(self._conn,
+ 'Msvm_ResourceAllocationSettingData', drivedflt)
#Set the IDE ctrller as parent.
- diskdrive.Parent = ctrller[0].path_()
- diskdrive.Address = 0
+ drive.Parent = ctrller[0].path_()
+ drive.Address = drive_addr
#Add the cloned disk drive object to the vm.
new_resources = self._vmutils.add_virt_resource(self._conn,
- diskdrive, vm)
+ drive, vm)
if new_resources is None:
raise vmutils.HyperVException(
- _('Failed to add diskdrive to VM %s') %
+ _('Failed to add drive to VM %s') %
vm_name)
- diskdrive_path = new_resources[0]
- LOG.debug(_('New disk drive path is %s'), diskdrive_path)
+ drive_path = new_resources[0]
+ LOG.debug(_('New %(drive_type)s drive path is %(drive_path)s') %
+ locals())
+
+ if drive_type == constants.IDE_DISK:
+ resSubType = 'Microsoft Virtual Hard Disk'
+ elif drive_type == constants.IDE_DVD:
+ resSubType = 'Microsoft Virtual CD/DVD Disk'
+
#Find the default VHD disk object.
- vhddefault = self._conn.query(
+ drivedefault = self._conn.query(
"SELECT * FROM Msvm_ResourceAllocationSettingData \
- WHERE ResourceSubType LIKE 'Microsoft Virtual Hard Disk' AND \
- InstanceID LIKE '%Default%' ")[0]
+ WHERE ResourceSubType LIKE '%(resSubType)s' AND \
+ InstanceID LIKE '%%Default%%' " % locals())[0]
#Clone the default and point it to the image file.
- vhddisk = self._vmutils.clone_wmi_obj(self._conn,
- 'Msvm_ResourceAllocationSettingData', vhddefault)
+ res = self._vmutils.clone_wmi_obj(self._conn,
+ 'Msvm_ResourceAllocationSettingData', drivedefault)
#Set the new drive as the parent.
- vhddisk.Parent = diskdrive_path
- vhddisk.Connection = [vhdfile]
+ res.Parent = drive_path
+ res.Connection = [path]
#Add the new vhd object as a virtual hard disk to the vm.
- new_resources = self._vmutils.add_virt_resource(self._conn,
- vhddisk, vm)
+ new_resources = self._vmutils.add_virt_resource(self._conn, res, vm)
if new_resources is None:
raise vmutils.HyperVException(
- _('Failed to add vhd file to VM %s') %
- vm_name)
- LOG.info(_('Created disk for %s'), vm_name)
+ _('Failed to add %(drive_type)s image to VM %(vm_name)s') %
+ locals())
+ LOG.info(_('Created drive type %(drive_type)s for %(vm_name)s') %
+ locals())
def _create_nic(self, vm_name, mac):
"""Create a (synthetic) nic and attach it to the vm"""
@@ -337,20 +421,20 @@ class VMOps(baseops.BaseOps):
"""
#If there are no physical nics connected to networks, return.
LOG.debug(_("Attempting to bind NIC to %s ")
- % FLAGS.vswitch_name)
- if FLAGS.vswitch_name:
+ % CONF.vswitch_name)
+ if CONF.vswitch_name:
LOG.debug(_("Attempting to bind NIC to %s ")
- % FLAGS.vswitch_name)
+ % CONF.vswitch_name)
bound = self._conn.Msvm_VirtualSwitch(
- ElementName=FLAGS.vswitch_name)
+ ElementName=CONF.vswitch_name)
else:
LOG.debug(_("No vSwitch specified, attaching to default"))
self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE')
if len(bound) == 0:
return None
- if FLAGS.vswitch_name:
+ if CONF.vswitch_name:
return self._conn.Msvm_VirtualSwitch(
- ElementName=FLAGS.vswitch_name)[0]\
+ ElementName=CONF.vswitch_name)[0]\
.associators(wmi_result_class='Msvm_SwitchPort')[0]\
.associators(wmi_result_class='Msvm_VirtualSwitch')[0]
else:
diff --git a/nova/virt/hyperv/vmutils.py b/nova/virt/hyperv/vmutils.py
index 2e54e6d47..15c03e613 100644
--- a/nova/virt/hyperv/vmutils.py
+++ b/nova/virt/hyperv/vmutils.py
@@ -25,8 +25,8 @@ import sys
import time
import uuid
+from nova import config
from nova import exception
-from nova import flags
from nova.openstack.common import log as logging
from nova.virt.hyperv import constants
from nova.virt import images
@@ -35,7 +35,7 @@ from nova.virt import images
if sys.platform == 'win32':
import wmi
-FLAGS = flags.FLAGS
+CONF = config.CONF
LOG = logging.getLogger(__name__)
@@ -74,21 +74,25 @@ class VMUtils(object):
% locals())
return True
+ def get_instance_path(self, instance_name):
+ instance_path = os.path.join(CONF.instances_path, instance_name)
+ if not os.path.exists(instance_path):
+ LOG.debug(_('Creating folder %s '), instance_path)
+ os.makedirs(instance_path)
+ return instance_path
+
def get_vhd_path(self, instance_name):
- base_vhd_folder = os.path.join(FLAGS.instances_path, instance_name)
- if not os.path.exists(base_vhd_folder):
- LOG.debug(_('Creating folder %s '), base_vhd_folder)
- os.makedirs(base_vhd_folder)
- return os.path.join(base_vhd_folder, instance_name + ".vhd")
+ instance_path = self.get_instance_path(instance_name)
+ return os.path.join(instance_path, instance_name + ".vhd")
def get_base_vhd_path(self, image_name):
- base_dir = os.path.join(FLAGS.instances_path, '_base')
+ base_dir = os.path.join(CONF.instances_path, '_base')
if not os.path.exists(base_dir):
os.makedirs(base_dir)
return os.path.join(base_dir, image_name + ".vhd")
def make_export_path(self, instance_name):
- export_folder = os.path.join(FLAGS.instances_path, "export",
+ export_folder = os.path.join(CONF.instances_path, "export",
instance_name)
if os.path.isdir(export_folder):
LOG.debug(_('Removing existing folder %s '), export_folder)
diff --git a/nova/virt/hyperv/volumeops.py b/nova/virt/hyperv/volumeops.py
index d15dfb68c..39bdc5497 100644
--- a/nova/virt/hyperv/volumeops.py
+++ b/nova/virt/hyperv/volumeops.py
@@ -21,7 +21,7 @@ Management class for Storage-related functions (attach, detach, etc).
import time
from nova import block_device
-from nova import flags
+from nova import config
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.virt import driver
@@ -40,8 +40,8 @@ hyper_volumeops_opts = [
help='The seconds to wait between an volume attachment attempt'),
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(hyper_volumeops_opts)
+CONF = config.CONF
+CONF.register_opts(hyper_volumeops_opts)
class VolumeOps(baseops.BaseOps):
@@ -59,9 +59,9 @@ class VolumeOps(baseops.BaseOps):
self._initiator = None
self._default_root_device = 'vda'
self._attaching_volume_retry_count = \
- FLAGS.hyperv_attaching_volume_retry_count
+ CONF.hyperv_attaching_volume_retry_count
self._wait_between_attach_retry = \
- FLAGS.hyperv_wait_between_attach_retry
+ CONF.hyperv_wait_between_attach_retry
self._volutils = volumeutils.VolumeUtils()
def attach_boot_volume(self, block_device_info, vm_name):
@@ -207,7 +207,7 @@ class VolumeOps(baseops.BaseOps):
LOG.warn(_('Could not determine iscsi initiator name'),
instance=instance)
return {
- 'ip': FLAGS.my_ip,
+ 'ip': CONF.my_ip,
'initiator': self._initiator,
}
diff --git a/nova/virt/hyperv/volumeutils.py b/nova/virt/hyperv/volumeutils.py
index 018a4c278..714666620 100644
--- a/nova/virt/hyperv/volumeutils.py
+++ b/nova/virt/hyperv/volumeutils.py
@@ -25,7 +25,7 @@ import sys
import time
from nova import block_device
-from nova import flags
+from nova import config
from nova.openstack.common import log as logging
from nova.virt import driver
from nova.virt.hyperv import vmutils
@@ -35,7 +35,7 @@ if sys.platform == 'win32':
import _winreg
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
+CONF = config.CONF
class VolumeUtils(object):
@@ -71,7 +71,7 @@ class VolumeUtils(object):
initiator_name = "iqn.1991-05.com.microsoft:" + \
hostname.lower()
return {
- 'ip': FLAGS.my_ip,
+ 'ip': CONF.my_ip,
'initiator': initiator_name,
}
@@ -89,7 +89,7 @@ class VolumeUtils(object):
#Sending login
self.execute('iscsicli.exe ' + 'qlogintarget ' + target_iqn)
#Waiting the disk to be mounted. Research this
- time.sleep(FLAGS.hyperv_wait_between_attach_retry)
+ time.sleep(CONF.hyperv_wait_between_attach_retry)
def logout_storage_target(self, _conn_wmi, target_iqn):
""" Logs out storage target through its session id """
diff --git a/nova/virt/images.py b/nova/virt/images.py
index 5b631a0da..bc6b21a7b 100644
--- a/nova/virt/images.py
+++ b/nova/virt/images.py
@@ -24,8 +24,8 @@ Handling of VM disk images.
import os
import re
+from nova import config
from nova import exception
-from nova import flags
from nova.image import glance
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
@@ -40,8 +40,8 @@ image_opts = [
help='Force backing images to raw format'),
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(image_opts)
+CONF = config.CONF
+CONF.register_opts(image_opts)
class QemuImgInfo(object):
@@ -218,7 +218,7 @@ def fetch_to_raw(context, image_href, path, user_id, project_id):
raise exception.ImageUnacceptable(image_id=image_href,
reason=_("fmt=%(fmt)s backed by: %(backing_file)s") % locals())
- if fmt != "raw" and FLAGS.force_raw_images:
+ if fmt != "raw" and CONF.force_raw_images:
staged = "%s.converted" % path
LOG.debug("%s was %s, converting to raw" % (image_href, fmt))
with utils.remove_path_on_error(staged):
diff --git a/nova/virt/libvirt/config.py b/nova/virt/libvirt/config.py
index 4c3483cb9..58d065d21 100644
--- a/nova/virt/libvirt/config.py
+++ b/nova/virt/libvirt/config.py
@@ -460,13 +460,15 @@ class LibvirtConfigGuestInterface(LibvirtConfigGuestDevice):
if self.net_type == "ethernet":
if self.script is not None:
dev.append(etree.Element("script", path=self.script))
- dev.append(etree.Element("target", dev=self.target_dev))
elif self.net_type == "direct":
dev.append(etree.Element("source", dev=self.source_dev,
mode="private"))
else:
dev.append(etree.Element("source", bridge=self.source_dev))
+ if self.target_dev is not None:
+ dev.append(etree.Element("target", dev=self.target_dev))
+
if self.vporttype is not None:
vport = etree.Element("virtualport", type=self.vporttype)
for p in self.vportparams:
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index da86fa135..88452f230 100644
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -57,13 +57,11 @@ from xml.dom import minidom
from nova.api.metadata import base as instance_metadata
from nova import block_device
-from nova.compute import instance_types
from nova.compute import power_state
from nova.compute import vm_mode
from nova import config
from nova import context as nova_context
from nova import exception
-from nova import flags
from nova.image import glance
from nova.openstack.common import cfg
from nova.openstack.common import excutils
@@ -185,10 +183,8 @@ libvirt_opts = [
'before uploading them to image service'),
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(libvirt_opts)
-
CONF = config.CONF
+CONF.register_opts(libvirt_opts)
CONF.import_opt('live_migration_retry_count', 'nova.compute.manager')
CONF.import_opt('vncserver_proxyclient_address', 'nova.vnc')
@@ -270,21 +266,22 @@ class LibvirtDriver(driver.ComputeDriver):
self._wrapped_conn = None
self.read_only = read_only
self.firewall_driver = firewall.load_driver(
- default=DEFAULT_FIREWALL_DRIVER,
+ DEFAULT_FIREWALL_DRIVER,
+ self.virtapi,
get_connection=self._get_connection)
- self.vif_driver = importutils.import_object(FLAGS.libvirt_vif_driver)
+ self.vif_driver = importutils.import_object(CONF.libvirt_vif_driver)
self.volume_drivers = {}
- for driver_str in FLAGS.libvirt_volume_drivers:
+ for driver_str in CONF.libvirt_volume_drivers:
driver_type, _sep, driver = driver_str.partition('=')
driver_class = importutils.import_class(driver)
self.volume_drivers[driver_type] = driver_class(self)
self._host_state = None
disk_prefix_map = {"lxc": "", "uml": "ubd", "xen": "sd"}
- if FLAGS.libvirt_disk_prefix:
- self._disk_prefix = FLAGS.libvirt_disk_prefix
+ if CONF.libvirt_disk_prefix:
+ self._disk_prefix = CONF.libvirt_disk_prefix
else:
- self._disk_prefix = disk_prefix_map.get(FLAGS.libvirt_type, 'vd')
+ self._disk_prefix = disk_prefix_map.get(CONF.libvirt_type, 'vd')
self.default_root_device = self._disk_prefix + 'a'
self.default_second_device = self._disk_prefix + 'b'
self.default_third_device = self._disk_prefix + 'c'
@@ -292,7 +289,7 @@ class LibvirtDriver(driver.ComputeDriver):
self._disk_cachemode = None
self.image_cache_manager = imagecache.ImageCacheManager()
- self.image_backend = imagebackend.Backend(FLAGS.use_cow_images)
+ self.image_backend = imagebackend.Backend(CONF.use_cow_images)
@property
def disk_cachemode(self):
@@ -305,7 +302,7 @@ class LibvirtDriver(driver.ComputeDriver):
# provided the filesystem is cache coherant (cluster filesystems
# typically are, but things like NFS are not).
self._disk_cachemode = "none"
- if not self._supports_direct_io(FLAGS.instances_path):
+ if not self._supports_direct_io(CONF.instances_path):
self._disk_cachemode = "writethrough"
return self._disk_cachemode
@@ -338,7 +335,7 @@ class LibvirtDriver(driver.ComputeDriver):
def _get_connection(self):
if not self._wrapped_conn or not self._test_connection():
LOG.debug(_('Connecting to libvirt: %s'), self.uri)
- if not FLAGS.libvirt_nonblocking:
+ if not CONF.libvirt_nonblocking:
self._wrapped_conn = self._connect(self.uri,
self.read_only)
else:
@@ -364,14 +361,14 @@ class LibvirtDriver(driver.ComputeDriver):
@property
def uri(self):
- if FLAGS.libvirt_type == 'uml':
- uri = FLAGS.libvirt_uri or 'uml:///system'
- elif FLAGS.libvirt_type == 'xen':
- uri = FLAGS.libvirt_uri or 'xen:///'
- elif FLAGS.libvirt_type == 'lxc':
- uri = FLAGS.libvirt_uri or 'lxc:///'
+ if CONF.libvirt_type == 'uml':
+ uri = CONF.libvirt_uri or 'uml:///system'
+ elif CONF.libvirt_type == 'xen':
+ uri = CONF.libvirt_uri or 'xen:///'
+ elif CONF.libvirt_type == 'lxc':
+ uri = CONF.libvirt_uri or 'lxc:///'
else:
- uri = FLAGS.libvirt_uri or 'qemu:///system'
+ uri = CONF.libvirt_uri or 'qemu:///system'
return uri
@staticmethod
@@ -550,11 +547,11 @@ class LibvirtDriver(driver.ComputeDriver):
connection_info,
mount_device)
- target = os.path.join(FLAGS.instances_path, instance['name'])
+ target = os.path.join(CONF.instances_path, instance['name'])
LOG.info(_('Deleting instance files %(target)s') % locals(),
instance=instance)
- if FLAGS.libvirt_type == 'lxc':
- container_dir = os.path.join(FLAGS.instances_path,
+ if CONF.libvirt_type == 'lxc':
+ container_dir = os.path.join(CONF.instances_path,
instance['name'],
'rootfs')
disk.destroy_container(container_dir=container_dir)
@@ -579,8 +576,8 @@ class LibvirtDriver(driver.ComputeDriver):
def _lvm_disks(self, instance):
"""Returns all LVM disks for given instance object"""
- if FLAGS.libvirt_images_volume_group:
- vg = os.path.join('/dev', FLAGS.libvirt_images_volume_group)
+ if CONF.libvirt_images_volume_group:
+ vg = os.path.join('/dev', CONF.libvirt_images_volume_group)
if not os.path.exists(vg):
return []
pattern = '%s_' % instance['name']
@@ -605,18 +602,18 @@ class LibvirtDriver(driver.ComputeDriver):
LOG.warn(_('Could not determine iscsi initiator name'),
instance=instance)
return {
- 'ip': FLAGS.my_ip,
+ 'ip': CONF.my_ip,
'initiator': self._initiator,
- 'host': FLAGS.host
+ 'host': CONF.host
}
def _cleanup_resize(self, instance, network_info):
- target = os.path.join(FLAGS.instances_path,
+ target = os.path.join(CONF.instances_path,
instance['name'] + "_resize")
if os.path.exists(target):
shutil.rmtree(target)
- if instance['host'] != FLAGS.host:
+ if instance['host'] != CONF.host:
self._undefine_domain(instance)
self.unplug_vifs(instance, network_info)
self.firewall_driver.unfilter_instance(instance, network_info)
@@ -638,7 +635,7 @@ class LibvirtDriver(driver.ComputeDriver):
connection_info,
mount_device)
- if FLAGS.libvirt_type == 'lxc':
+ if CONF.libvirt_type == 'lxc':
self._attach_lxc_volume(conf.to_xml(), virt_dom, instance_name)
# TODO(danms) once libvirt has support for LXC hotplug,
# replace this re-define with use of the
@@ -701,7 +698,7 @@ class LibvirtDriver(driver.ComputeDriver):
xml = self._get_disk_xml(virt_dom.XMLDesc(0), mount_device)
if not xml:
raise exception.DiskNotFound(location=mount_device)
- if FLAGS.libvirt_type == 'lxc':
+ if CONF.libvirt_type == 'lxc':
self._detach_lxc_volume(xml, virt_dom, instance_name)
# TODO(danms) once libvirt has support for LXC hotplug,
# replace this re-define with use of the
@@ -745,6 +742,13 @@ class LibvirtDriver(driver.ComputeDriver):
if lxc_container_target:
disk.bind(lxc_host_volume, lxc_container_target, instance_name)
+ s = os.stat(lxc_host_volume)
+ cgroup_info = "b %s:%s rwm\n" % (os.major(s.st_rdev),
+ os.minor(s.st_rdev))
+ cgroups_path = ("/sys/fs/cgroup/devices/libvirt/lxc/"
+ "%s/devices.allow" % instance_name)
+ utils.execute('tee', cgroups_path,
+ process_input=cgroup_info, run_as_root=True)
@exception.wrap_exception()
def _detach_lxc_volume(self, xml, virt_dom, instance_name):
@@ -821,7 +825,7 @@ class LibvirtDriver(driver.ComputeDriver):
disk_path = libvirt_utils.find_disk(virt_dom)
source_format = libvirt_utils.get_disk_type(disk_path)
- image_format = FLAGS.snapshot_image_format or source_format
+ image_format = CONF.snapshot_image_format or source_format
# NOTE(bfilippov): save lvm as raw
if image_format == 'lvm':
@@ -841,7 +845,7 @@ class LibvirtDriver(driver.ComputeDriver):
state = LIBVIRT_POWER_STATE[state]
# NOTE(dkang): managedSave does not work for LXC
- if FLAGS.libvirt_type != 'lxc':
+ if CONF.libvirt_type != 'lxc':
if state == power_state.RUNNING:
virt_dom.managedSave(0)
@@ -852,7 +856,7 @@ class LibvirtDriver(driver.ComputeDriver):
snapshot.create()
# Export the snapshot to a raw image
- snapshot_directory = FLAGS.libvirt_snapshots_directory
+ snapshot_directory = CONF.libvirt_snapshots_directory
fileutils.ensure_tree(snapshot_directory)
with utils.tempdir(dir=snapshot_directory) as tmpdir:
try:
@@ -862,7 +866,7 @@ class LibvirtDriver(driver.ComputeDriver):
snapshot.delete()
# NOTE(dkang): because previous managedSave is not called
# for LXC, _create_domain must not be called.
- if FLAGS.libvirt_type != 'lxc':
+ if CONF.libvirt_type != 'lxc':
if state == power_state.RUNNING:
self._create_domain(domain=virt_dom)
@@ -910,7 +914,7 @@ class LibvirtDriver(driver.ComputeDriver):
# NOTE(vish): This actually could take slighty longer than the
# FLAG defines depending on how long the get_info
# call takes to return.
- for x in xrange(FLAGS.libvirt_wait_soft_reboot_seconds):
+ for x in xrange(CONF.libvirt_wait_soft_reboot_seconds):
(state, _max_mem, _mem, _cpus, _t) = dom.info()
state = LIBVIRT_POWER_STATE[state]
@@ -1017,15 +1021,15 @@ class LibvirtDriver(driver.ComputeDriver):
"""
unrescue_xml = self._get_domain_xml(instance, network_info)
- unrescue_xml_path = os.path.join(FLAGS.instances_path,
+ unrescue_xml_path = os.path.join(CONF.instances_path,
instance['name'],
'unrescue.xml')
libvirt_utils.write_to_file(unrescue_xml_path, unrescue_xml)
rescue_images = {
- 'image_id': FLAGS.rescue_image_id or instance['image_ref'],
- 'kernel_id': FLAGS.rescue_kernel_id or instance['kernel_id'],
- 'ramdisk_id': FLAGS.rescue_ramdisk_id or instance['ramdisk_id'],
+ 'image_id': CONF.rescue_image_id or instance['image_ref'],
+ 'kernel_id': CONF.rescue_kernel_id or instance['kernel_id'],
+ 'ramdisk_id': CONF.rescue_ramdisk_id or instance['ramdisk_id'],
}
xml = self.to_xml(instance, network_info, image_meta,
rescue=rescue_images)
@@ -1039,7 +1043,7 @@ class LibvirtDriver(driver.ComputeDriver):
def unrescue(self, instance, network_info):
"""Reboot the VM which is being rescued back into primary images.
"""
- unrescue_xml_path = os.path.join(FLAGS.instances_path,
+ unrescue_xml_path = os.path.join(CONF.instances_path,
instance['name'],
'unrescue.xml')
xml = libvirt_utils.load_file(unrescue_xml_path)
@@ -1047,7 +1051,7 @@ class LibvirtDriver(driver.ComputeDriver):
self._destroy(instance)
self._create_domain(xml, virt_dom)
libvirt_utils.file_delete(unrescue_xml_path)
- rescue_files = os.path.join(FLAGS.instances_path, instance['name'],
+ rescue_files = os.path.join(CONF.instances_path, instance['name'],
"*.rescue")
for rescue_file in glob.iglob(rescue_files):
libvirt_utils.file_delete(rescue_file)
@@ -1163,7 +1167,7 @@ class LibvirtDriver(driver.ComputeDriver):
@staticmethod
def get_host_ip_addr():
- return FLAGS.my_ip
+ return CONF.my_ip
@exception.wrap_exception()
def get_vnc_console(self, instance):
@@ -1178,7 +1182,7 @@ class LibvirtDriver(driver.ComputeDriver):
return graphic.getAttribute('port')
port = get_vnc_port_for_instance(instance['name'])
- host = FLAGS.vncserver_proxyclient_address
+ host = CONF.vncserver_proxyclient_address
return {'host': host, 'port': port, 'internal_access_path': None}
@@ -1224,7 +1228,7 @@ class LibvirtDriver(driver.ComputeDriver):
"""Create a blank image of specified size"""
if not fs_format:
- fs_format = FLAGS.default_ephemeral_format
+ fs_format = CONF.default_ephemeral_format
libvirt_utils.create_image('raw', target,
'%d%c' % (local_size, unit))
@@ -1243,7 +1247,7 @@ class LibvirtDriver(driver.ComputeDriver):
@staticmethod
def _get_console_log_path(instance_name):
- return os.path.join(FLAGS.instances_path, instance_name,
+ return os.path.join(CONF.instances_path, instance_name,
'console.log')
def _chown_console_log_for_instance(self, instance_name):
@@ -1259,11 +1263,11 @@ class LibvirtDriver(driver.ComputeDriver):
# syntactic nicety
def basepath(fname='', suffix=suffix):
- return os.path.join(FLAGS.instances_path,
+ return os.path.join(CONF.instances_path,
instance['name'],
fname + suffix)
- def image(fname, image_type=FLAGS.libvirt_images_type):
+ def image(fname, image_type=CONF.libvirt_images_type):
return self.image_backend.image(instance['name'],
fname + suffix, image_type)
@@ -1276,8 +1280,8 @@ class LibvirtDriver(driver.ComputeDriver):
LOG.info(_('Creating image'), instance=instance)
libvirt_utils.write_to_file(basepath('libvirt.xml'), libvirt_xml)
- if FLAGS.libvirt_type == 'lxc':
- container_dir = os.path.join(FLAGS.instances_path,
+ if CONF.libvirt_type == 'lxc':
+ container_dir = os.path.join(CONF.instances_path,
instance['name'],
'rootfs')
fileutils.ensure_tree(container_dir)
@@ -1313,8 +1317,7 @@ class LibvirtDriver(driver.ComputeDriver):
root_fname = hashlib.sha1(str(disk_images['image_id'])).hexdigest()
size = instance['root_gb'] * 1024 * 1024 * 1024
- inst_type_id = instance['instance_type_id']
- inst_type = instance_types.get_instance_type(inst_type_id)
+ inst_type = instance['instance_type']
if size == 0 or suffix == '.rescue':
size = None
@@ -1378,13 +1381,13 @@ class LibvirtDriver(driver.ComputeDriver):
# target partition for file injection
target_partition = None
if not instance['kernel_id']:
- target_partition = FLAGS.libvirt_inject_partition
+ target_partition = CONF.libvirt_inject_partition
if target_partition == 0:
target_partition = None
- if FLAGS.libvirt_type == 'lxc':
+ if CONF.libvirt_type == 'lxc':
target_partition = None
- if FLAGS.libvirt_inject_key and instance['key_data']:
+ if CONF.libvirt_inject_key and instance['key_data']:
key = str(instance['key_data'])
else:
key = None
@@ -1392,7 +1395,7 @@ class LibvirtDriver(driver.ComputeDriver):
# File injection
metadata = instance.get('metadata')
- if not FLAGS.libvirt_inject_password:
+ if not CONF.libvirt_inject_password:
admin_pass = None
net = netutils.get_injected_network_template(network_info)
@@ -1429,7 +1432,7 @@ class LibvirtDriver(driver.ComputeDriver):
disk.inject_data(injection_path,
key, net, metadata, admin_pass, files,
partition=target_partition,
- use_cow=FLAGS.use_cow_images)
+ use_cow=CONF.use_cow_images)
except Exception as e:
# This could be a windows image, or a vmdk format disk
@@ -1437,13 +1440,13 @@ class LibvirtDriver(driver.ComputeDriver):
'%(img_id)s (%(e)s)') % locals(),
instance=instance)
- if FLAGS.libvirt_type == 'lxc':
- disk.setup_container(basepath('disk'),
+ if CONF.libvirt_type == 'lxc':
+ disk.setup_container(image('disk').path,
container_dir=container_dir,
- use_cow=FLAGS.use_cow_images)
+ use_cow=CONF.use_cow_images)
- if FLAGS.libvirt_type == 'uml':
- libvirt_utils.chown(basepath('disk'), 'root')
+ if CONF.libvirt_type == 'uml':
+ libvirt_utils.chown(image('disk').path, 'root')
@staticmethod
def _volume_in_mapping(mount_device, block_device_info):
@@ -1494,11 +1497,11 @@ class LibvirtDriver(driver.ComputeDriver):
return guestcpu
def get_guest_cpu_config(self):
- mode = FLAGS.libvirt_cpu_mode
- model = FLAGS.libvirt_cpu_model
+ mode = CONF.libvirt_cpu_mode
+ model = CONF.libvirt_cpu_model
if mode is None:
- if FLAGS.libvirt_type == "kvm" or FLAGS.libvirt_type == "qemu":
+ if CONF.libvirt_type == "kvm" or CONF.libvirt_type == "qemu":
mode = "host-model"
else:
mode = "none"
@@ -1506,10 +1509,10 @@ class LibvirtDriver(driver.ComputeDriver):
if mode == "none":
return None
- if FLAGS.libvirt_type != "kvm" and FLAGS.libvirt_type != "qemu":
+ if CONF.libvirt_type != "kvm" and CONF.libvirt_type != "qemu":
msg = _("Config requested an explicit CPU model, but "
"the current libvirt hypervisor '%s' does not "
- "support selecting CPU models") % FLAGS.libvirt_type
+ "support selecting CPU models") % CONF.libvirt_type
raise exception.Invalid(msg)
if mode == "custom" and model is None:
@@ -1552,10 +1555,10 @@ class LibvirtDriver(driver.ComputeDriver):
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
- if FLAGS.libvirt_type == "lxc":
+ if CONF.libvirt_type == "lxc":
fs = vconfig.LibvirtConfigGuestFilesys()
fs.source_type = "mount"
- fs.source_dir = os.path.join(FLAGS.instances_path,
+ fs.source_dir = os.path.join(CONF.instances_path,
instance['name'],
'rootfs')
devices.append(fs)
@@ -1566,9 +1569,9 @@ class LibvirtDriver(driver.ComputeDriver):
else:
root_device_type = 'disk'
- if FLAGS.libvirt_type == "uml":
+ if CONF.libvirt_type == "uml":
default_disk_bus = "uml"
- elif FLAGS.libvirt_type == "xen":
+ elif CONF.libvirt_type == "xen":
default_disk_bus = "xen"
else:
default_disk_bus = "virtio"
@@ -1663,7 +1666,7 @@ class LibvirtDriver(driver.ComputeDriver):
diskconfig.source_type = "file"
diskconfig.driver_format = "raw"
diskconfig.driver_cache = self.disk_cachemode
- diskconfig.source_path = os.path.join(FLAGS.instances_path,
+ diskconfig.source_path = os.path.join(CONF.instances_path,
instance['name'],
"disk.config")
diskconfig.target_dev = self.default_last_device
@@ -1680,13 +1683,10 @@ class LibvirtDriver(driver.ComputeDriver):
'ramdisk_id' if a ramdisk is needed for the rescue image and
'kernel_id' if a kernel is needed for the rescue image.
"""
- # FIXME(vish): stick this in db
- inst_type_id = instance['instance_type_id']
- inst_type = instance_types.get_instance_type(inst_type_id,
- inactive=True)
+ inst_type = instance['instance_type']
guest = vconfig.LibvirtConfigGuest()
- guest.virt_type = FLAGS.libvirt_type
+ guest.virt_type = CONF.libvirt_type
guest.name = instance['name']
guest.uuid = instance['uuid']
guest.memory = inst_type['memory_mb'] * 1024
@@ -1708,65 +1708,65 @@ class LibvirtDriver(driver.ComputeDriver):
guest.os_type = vm_mode.get_from_instance(instance)
if guest.os_type is None:
- if FLAGS.libvirt_type == "lxc":
+ if CONF.libvirt_type == "lxc":
guest.os_type = vm_mode.EXE
- elif FLAGS.libvirt_type == "uml":
+ elif CONF.libvirt_type == "uml":
guest.os_type = vm_mode.UML
- elif FLAGS.libvirt_type == "xen":
+ elif CONF.libvirt_type == "xen":
guest.os_type = vm_mode.XEN
else:
guest.os_type = vm_mode.HVM
- if FLAGS.libvirt_type == "xen" and guest.os_type == vm_mode.HVM:
+ if CONF.libvirt_type == "xen" and guest.os_type == vm_mode.HVM:
guest.os_loader = '/usr/lib/xen/boot/hvmloader'
- if FLAGS.libvirt_type == "lxc":
+ if CONF.libvirt_type == "lxc":
guest.os_type = vm_mode.EXE
guest.os_init_path = "/sbin/init"
guest.os_cmdline = "console=ttyS0"
- elif FLAGS.libvirt_type == "uml":
+ elif CONF.libvirt_type == "uml":
guest.os_type = vm_mode.UML
guest.os_kernel = "/usr/bin/linux"
guest.os_root = root_device_name or "/dev/ubda"
else:
- if FLAGS.libvirt_type == "xen" and guest.os_type == vm_mode.XEN:
+ if CONF.libvirt_type == "xen" and guest.os_type == vm_mode.XEN:
guest.os_root = root_device_name or "/dev/xvda"
else:
guest.os_type = vm_mode.HVM
if rescue:
if rescue.get('kernel_id'):
- guest.os_kernel = os.path.join(FLAGS.instances_path,
+ guest.os_kernel = os.path.join(CONF.instances_path,
instance['name'],
"kernel.rescue")
if rescue.get('ramdisk_id'):
- guest.os_initrd = os.path.join(FLAGS.instances_path,
+ guest.os_initrd = os.path.join(CONF.instances_path,
instance['name'],
"ramdisk.rescue")
elif instance['kernel_id']:
- guest.os_kernel = os.path.join(FLAGS.instances_path,
+ guest.os_kernel = os.path.join(CONF.instances_path,
instance['name'],
"kernel")
- if FLAGS.libvirt_type == "xen":
+ if CONF.libvirt_type == "xen":
guest.os_cmdline = "ro"
else:
guest.os_cmdline = "root=%s console=ttyS0" % (
root_device_name or "/dev/vda",)
if instance['ramdisk_id']:
- guest.os_initrd = os.path.join(FLAGS.instances_path,
+ guest.os_initrd = os.path.join(CONF.instances_path,
instance['name'],
"ramdisk")
else:
guest.os_boot_dev = "hd"
- if FLAGS.libvirt_type != "lxc" and FLAGS.libvirt_type != "uml":
+ if CONF.libvirt_type != "lxc" and CONF.libvirt_type != "uml":
guest.acpi = True
clk = vconfig.LibvirtConfigGuestClock()
clk.offset = "utc"
guest.set_clock(clk)
- if FLAGS.libvirt_type == "kvm":
+ if CONF.libvirt_type == "kvm":
# TODO(berrange) One day this should be per-guest
# OS type configurable
tmpit = vconfig.LibvirtConfigGuestTimer()
@@ -1793,14 +1793,14 @@ class LibvirtDriver(driver.ComputeDriver):
cfg = self.vif_driver.plug(instance, (network, mapping))
guest.add_device(cfg)
- if FLAGS.libvirt_type == "qemu" or FLAGS.libvirt_type == "kvm":
+ if CONF.libvirt_type == "qemu" or CONF.libvirt_type == "kvm":
# The QEMU 'pty' driver throws away any data if no
# client app is connected. Thus we can't get away
# with a single type=pty console. Instead we have
# to configure two separate consoles.
consolelog = vconfig.LibvirtConfigGuestSerial()
consolelog.type = "file"
- consolelog.source_path = os.path.join(FLAGS.instances_path,
+ consolelog.source_path = os.path.join(CONF.instances_path,
instance['name'],
"console.log")
guest.add_device(consolelog)
@@ -1813,8 +1813,8 @@ class LibvirtDriver(driver.ComputeDriver):
consolepty.type = "pty"
guest.add_device(consolepty)
- if FLAGS.vnc_enabled and FLAGS.libvirt_type not in ('lxc', 'uml'):
- if FLAGS.use_usb_tablet and guest.os_type == vm_mode.HVM:
+ if CONF.vnc_enabled and CONF.libvirt_type not in ('lxc', 'uml'):
+ if CONF.use_usb_tablet and guest.os_type == vm_mode.HVM:
tablet = vconfig.LibvirtConfigGuestInput()
tablet.type = "tablet"
tablet.bus = "usb"
@@ -1822,8 +1822,8 @@ class LibvirtDriver(driver.ComputeDriver):
graphics = vconfig.LibvirtConfigGuestGraphics()
graphics.type = "vnc"
- graphics.keymap = FLAGS.vnc_keymap
- graphics.listen = FLAGS.vncserver_listen
+ graphics.keymap = CONF.vnc_keymap
+ graphics.listen = CONF.vncserver_listen
guest.add_device(graphics)
return guest
@@ -2010,7 +2010,7 @@ class LibvirtDriver(driver.ComputeDriver):
"""
- stats = libvirt_utils.get_fs_info(FLAGS.instances_path)
+ stats = libvirt_utils.get_fs_info(CONF.instances_path)
return stats['total'] / (1024 ** 3)
def get_vcpu_used(self):
@@ -2048,7 +2048,7 @@ class LibvirtDriver(driver.ComputeDriver):
idx1 = m.index('MemFree:')
idx2 = m.index('Buffers:')
idx3 = m.index('Cached:')
- if FLAGS.libvirt_type == 'xen':
+ if CONF.libvirt_type == 'xen':
used = 0
for domain_id in self.list_instance_ids():
# skip dom0
@@ -2079,7 +2079,7 @@ class LibvirtDriver(driver.ComputeDriver):
"""
- stats = libvirt_utils.get_fs_info(FLAGS.instances_path)
+ stats = libvirt_utils.get_fs_info(CONF.instances_path)
return stats['used'] / (1024 ** 3)
def get_hypervisor_type(self):
@@ -2206,12 +2206,13 @@ class LibvirtDriver(driver.ComputeDriver):
def refresh_provider_fw_rules(self):
self.firewall_driver.refresh_provider_fw_rules()
- def get_available_resource(self):
+ def get_available_resource(self, nodename):
"""Retrieve resource info.
This method is called as a periodic task and is used only
in live migration currently.
+ :param nodename: ignored in this driver
:returns: dictionary containing resource info
"""
dic = {'vcpus': self.get_vcpu_total(),
@@ -2245,7 +2246,7 @@ class LibvirtDriver(driver.ComputeDriver):
if block_migration:
disk_available_gb = dst_compute_info['disk_available_least']
disk_available_mb = \
- (disk_available_gb * 1024) - FLAGS.reserved_host_disk_mb
+ (disk_available_gb * 1024) - CONF.reserved_host_disk_mb
# Compare CPU
src = instance_ref['host']
@@ -2282,7 +2283,7 @@ class LibvirtDriver(driver.ComputeDriver):
"""
# Checking shared storage connectivity
# if block migration, instances_paths should not be on shared storage.
- source = FLAGS.host
+ source = CONF.host
filename = dest_check_data["filename"]
block_migration = dest_check_data["block_migration"]
@@ -2352,6 +2353,11 @@ class LibvirtDriver(driver.ComputeDriver):
None. if given cpu info is not compatible to this server,
raise exception.
"""
+
+ # NOTE(berendt): virConnectCompareCPU not working for Xen
+ if CONF.libvirt_type == 'xen':
+ return 1
+
info = jsonutils.loads(cpu_info)
LOG.info(_('Instance launched has CPU info:\n%s') % cpu_info)
cpu = vconfig.LibvirtConfigCPU()
@@ -2362,7 +2368,7 @@ class LibvirtDriver(driver.ComputeDriver):
cpu.cores = info['topology']['cores']
cpu.threads = info['topology']['threads']
for f in info['features']:
- cpu.add_feature(config.LibvirtConfigCPUFeature(f))
+ cpu.add_feature(vconfig.LibvirtConfigCPUFeature(f))
u = "http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult"
m = _("CPU doesn't have compatibility.\n\n%(ret)s\n\nRefer to %(u)s")
@@ -2375,12 +2381,12 @@ class LibvirtDriver(driver.ComputeDriver):
raise
if ret <= 0:
- LOG.error(reason=m % locals())
+ LOG.error(m % locals())
raise exception.InvalidCPUInfo(reason=m % locals())
def _create_shared_storage_test_file(self):
- """Makes tmpfile under FLAGS.instance_path."""
- dirpath = FLAGS.instances_path
+ """Makes tmpfile under CONF.instance_path."""
+ dirpath = CONF.instances_path
fd, tmp_file = tempfile.mkstemp(dir=dirpath)
LOG.debug(_("Creating tmpfile %s to notify to other "
"compute nodes that they should mount "
@@ -2389,17 +2395,17 @@ class LibvirtDriver(driver.ComputeDriver):
return os.path.basename(tmp_file)
def _check_shared_storage_test_file(self, filename):
- """Confirms existence of the tmpfile under FLAGS.instances_path.
+ """Confirms existence of the tmpfile under CONF.instances_path.
Cannot confirm tmpfile return False."""
- tmp_file = os.path.join(FLAGS.instances_path, filename)
+ tmp_file = os.path.join(CONF.instances_path, filename)
if not os.path.exists(tmp_file):
return False
else:
return True
def _cleanup_shared_storage_test_file(self, filename):
- """Removes existence of the tmpfile under FLAGS.instances_path."""
- tmp_file = os.path.join(FLAGS.instances_path, filename)
+ """Removes existence of the tmpfile under CONF.instances_path."""
+ tmp_file = os.path.join(CONF.instances_path, filename)
os.remove(tmp_file)
def ensure_filtering_rules_for_instance(self, instance_ref, network_info,
@@ -2423,7 +2429,7 @@ class LibvirtDriver(driver.ComputeDriver):
# nwfilters may be defined in a separate thread in the case
# of libvirt non-blocking mode, so we wait for completion
- timeout_count = range(FLAGS.live_migration_retry_count)
+ timeout_count = range(CONF.live_migration_retry_count)
while timeout_count:
if self.firewall_driver.instance_filter_exists(instance_ref,
network_info):
@@ -2486,17 +2492,17 @@ class LibvirtDriver(driver.ComputeDriver):
# Do live migration.
try:
if block_migration:
- flaglist = FLAGS.block_migration_flag.split(',')
+ flaglist = CONF.block_migration_flag.split(',')
else:
- flaglist = FLAGS.live_migration_flag.split(',')
+ flaglist = CONF.live_migration_flag.split(',')
flagvals = [getattr(libvirt, x.strip()) for x in flaglist]
logical_sum = reduce(lambda x, y: x | y, flagvals)
dom = self._lookup_by_name(instance_ref["name"])
- dom.migrateToURI(FLAGS.live_migration_uri % dest,
+ dom.migrateToURI(CONF.live_migration_uri % dest,
logical_sum,
None,
- FLAGS.live_migration_bandwidth)
+ CONF.live_migration_bandwidth)
except Exception as e:
with excutils.save_and_reraise_exception():
@@ -2535,7 +2541,7 @@ class LibvirtDriver(driver.ComputeDriver):
# ensure_filtering_rules_for_instance, to ensure bridge is set up
# Retry operation is necessary because continuously request comes,
# concorrent request occurs to iptables, then it complains.
- max_retry = FLAGS.live_migration_retry_count
+ max_retry = CONF.live_migration_retry_count
for cnt in range(max_retry):
try:
self.plug_vifs(instance_ref, network_info)
@@ -2563,7 +2569,7 @@ class LibvirtDriver(driver.ComputeDriver):
disk_info = jsonutils.loads(disk_info_json)
# make instance directory
- instance_dir = os.path.join(FLAGS.instances_path, instance['name'])
+ instance_dir = os.path.join(CONF.instances_path, instance['name'])
if os.path.exists(instance_dir):
raise exception.DestinationDiskExists(path=instance_dir)
os.mkdir(instance_dir)
@@ -2584,7 +2590,7 @@ class LibvirtDriver(driver.ComputeDriver):
image = self.image_backend.image(instance['name'],
instance_disk,
- FLAGS.libvirt_images_type)
+ CONF.libvirt_images_type)
image.cache(fetch_func=libvirt_utils.fetch_image,
context=ctxt,
filename=cache_name,
@@ -2625,7 +2631,7 @@ class LibvirtDriver(driver.ComputeDriver):
# Define migrated instance, otherwise, suspend/destroy does not work.
dom_list = self._conn.listDefinedDomains()
if instance_ref["name"] not in dom_list:
- instance_dir = os.path.join(FLAGS.instances_path,
+ instance_dir = os.path.join(CONF.instances_path,
instance_ref["name"])
xml_path = os.path.join(instance_dir, 'libvirt.xml')
# In case of block migration, destination does not have
@@ -2802,7 +2808,7 @@ class LibvirtDriver(driver.ComputeDriver):
# rename instance dir to +_resize at first for using
# shared storage for instance dir (eg. NFS).
same_host = (dest == self.get_host_ip_addr())
- inst_base = "%s/%s" % (FLAGS.instances_path, instance['name'])
+ inst_base = "%s/%s" % (CONF.instances_path, instance['name'])
inst_base_resize = inst_base + "_resize"
try:
utils.execute('mv', inst_base, inst_base_resize)
@@ -2876,7 +2882,7 @@ class LibvirtDriver(driver.ComputeDriver):
if size:
disk.extend(info['path'], size)
- if fmt == 'raw' and FLAGS.use_cow_images:
+ if fmt == 'raw' and CONF.use_cow_images:
# back to qcow2 (no backing_file though) so that snapshot
# will be available
path_qcow = info['path'] + '_qcow'
@@ -2902,7 +2908,7 @@ class LibvirtDriver(driver.ComputeDriver):
LOG.debug(_("Starting finish_revert_migration"),
instance=instance)
- inst_base = "%s/%s" % (FLAGS.instances_path, instance['name'])
+ inst_base = "%s/%s" % (CONF.instances_path, instance['name'])
inst_base_resize = inst_base + "_resize"
utils.execute('mv', inst_base_resize, inst_base)
@@ -2988,7 +2994,7 @@ class LibvirtDriver(driver.ComputeDriver):
mem = domain.memoryStats()
for key in mem.keys():
output["memory-" + key] = mem[key]
- except libvirt.libvirtError:
+ except (libvirt.libvirtError, AttributeError):
pass
return output
diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py
index 819a8ec0c..2e73661f6 100644
--- a/nova/virt/libvirt/firewall.py
+++ b/nova/virt/libvirt/firewall.py
@@ -20,13 +20,12 @@
from eventlet import tpool
-from nova import flags
+from nova import config
from nova.openstack.common import log as logging
import nova.virt.firewall as base_firewall
-
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
+CONF = config.CONF
try:
import libvirt
@@ -44,7 +43,8 @@ class NWFilterFirewall(base_firewall.FirewallDriver):
spoofing, IP spoofing, and ARP spoofing.
"""
- def __init__(self, get_connection, **kwargs):
+ def __init__(self, virtapi, get_connection, **kwargs):
+ super(NWFilterFirewall, self).__init__(virtapi)
self._libvirt_get_connection = get_connection
self.static_filters_configured = False
self.handle_security_groups = False
@@ -115,7 +115,7 @@ class NWFilterFirewall(base_firewall.FirewallDriver):
if mapping['dhcp_server']:
allow_dhcp = True
break
- if instance['image_ref'] == str(FLAGS.vpn_image_id):
+ if instance['image_ref'] == str(CONF.vpn_image_id):
base_filter = 'nova-vpn'
elif allow_dhcp:
base_filter = 'nova-base'
@@ -142,7 +142,7 @@ class NWFilterFirewall(base_firewall.FirewallDriver):
filter_set = ['no-mac-spoofing',
'no-ip-spoofing',
'no-arp-spoofing']
- if FLAGS.use_ipv6:
+ if CONF.use_ipv6:
self._define_filter(self.nova_no_nd_reflection_filter)
filter_set.append('nova-no-nd-reflection')
self._define_filter(self._filter_container('nova-nodhcp', filter_set))
@@ -164,7 +164,7 @@ class NWFilterFirewall(base_firewall.FirewallDriver):
if callable(xml):
xml = xml()
# execute in a native thread and block current greenthread until done
- if not FLAGS.libvirt_nonblocking:
+ if not CONF.libvirt_nonblocking:
# NOTE(maoy): the original implementation is to have the API called
# in the thread pool no matter what.
tpool.execute(self._conn.nwfilterDefineXML, xml)
@@ -219,9 +219,9 @@ class NWFilterFirewall(base_firewall.FirewallDriver):
class IptablesFirewallDriver(base_firewall.IptablesFirewallDriver):
- def __init__(self, execute=None, **kwargs):
- super(IptablesFirewallDriver, self).__init__(**kwargs)
- self.nwfilter = NWFilterFirewall(kwargs['get_connection'])
+ def __init__(self, virtapi, execute=None, **kwargs):
+ super(IptablesFirewallDriver, self).__init__(virtapi, **kwargs)
+ self.nwfilter = NWFilterFirewall(virtapi, kwargs['get_connection'])
def setup_basic_filtering(self, instance, network_info):
"""Set up provider rules and basic NWFilter."""
diff --git a/nova/virt/libvirt/imagebackend.py b/nova/virt/libvirt/imagebackend.py
index 3dc8e2037..18a8aa910 100644
--- a/nova/virt/libvirt/imagebackend.py
+++ b/nova/virt/libvirt/imagebackend.py
@@ -19,14 +19,14 @@ import abc
import contextlib
import os
-from nova import flags
+from nova import config
from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common import fileutils
from nova.openstack.common import lockutils
from nova import utils
from nova.virt.disk import api as disk
-from nova.virt.libvirt import config
+from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import snapshots
from nova.virt.libvirt import utils as libvirt_utils
@@ -46,8 +46,8 @@ __imagebackend_opts = [
' if this flag is set to True.'),
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(__imagebackend_opts)
+CONF = config.CONF
+CONF.register_opts(__imagebackend_opts)
class Image(object):
@@ -67,7 +67,7 @@ class Image(object):
# NOTE(mikal): We need a lock directory which is shared along with
# instance files, to cover the scenario where multiple compute nodes
# are trying to create a base file at the same time
- self.lock_path = os.path.join(FLAGS.instances_path, 'locks')
+ self.lock_path = os.path.join(CONF.instances_path, 'locks')
@abc.abstractmethod
def create_image(self, prepare_template, base, size, *args, **kwargs):
@@ -90,7 +90,7 @@ class Image(object):
:device_type: Device type for this image.
:cache_mode: Caching mode for this image
"""
- info = config.LibvirtConfigGuestDisk()
+ info = vconfig.LibvirtConfigGuestDisk()
info.source_type = self.source_type
info.source_device = device_type
info.target_bus = disk_bus
@@ -121,7 +121,7 @@ class Image(object):
fetch_func(target=target, *args, **kwargs)
if not os.path.exists(self.path):
- base_dir = os.path.join(FLAGS.instances_path, '_base')
+ base_dir = os.path.join(CONF.instances_path, '_base')
if not os.path.exists(base_dir):
fileutils.ensure_tree(base_dir)
base = os.path.join(base_dir, filename)
@@ -142,7 +142,7 @@ class Raw(Image):
def __init__(self, instance=None, name=None, path=None):
super(Raw, self).__init__("file", "raw", is_block_dev=False)
- self.path = path or os.path.join(FLAGS.instances_path,
+ self.path = path or os.path.join(CONF.instances_path,
instance, name)
def create_image(self, prepare_template, base, size, *args, **kwargs):
@@ -170,7 +170,7 @@ class Qcow2(Image):
def __init__(self, instance=None, name=None, path=None):
super(Qcow2, self).__init__("file", "qcow2", is_block_dev=False)
- self.path = path or os.path.join(FLAGS.instances_path,
+ self.path = path or os.path.join(CONF.instances_path,
instance, name)
def create_image(self, prepare_template, base, size, *args, **kwargs):
@@ -209,16 +209,16 @@ class Lvm(Image):
self.lv = info['LV']
self.path = path
else:
- if not FLAGS.libvirt_images_volume_group:
+ if not CONF.libvirt_images_volume_group:
raise RuntimeError(_('You should specify'
' libvirt_images_volume_group'
' flag to use LVM images.'))
- self.vg = FLAGS.libvirt_images_volume_group
+ self.vg = CONF.libvirt_images_volume_group
self.lv = '%s_%s' % (self.escape(instance),
self.escape(name))
self.path = os.path.join('/dev', self.vg, self.lv)
- self.sparse = FLAGS.libvirt_sparse_logical_volumes
+ self.sparse = CONF.libvirt_sparse_logical_volumes
def create_image(self, prepare_template, base, size, *args, **kwargs):
@lockutils.synchronized(base, 'nova-', external=True,
@@ -270,7 +270,7 @@ class Backend(object):
def backend(self, image_type=None):
if not image_type:
- image_type = FLAGS.libvirt_images_type
+ image_type = CONF.libvirt_images_type
image = self.BACKEND.get(image_type)
if not image:
raise RuntimeError(_('Unknown image_type=%s') % image_type)
@@ -282,7 +282,7 @@ class Backend(object):
:instance: Instance name.
:name: Image name.
:image_type: Image type.
- Optional, is FLAGS.libvirt_images_type by default.
+ Optional, is CONF.libvirt_images_type by default.
"""
backend = self.backend(image_type)
return backend(instance=instance, name=name)
diff --git a/nova/virt/libvirt/imagecache.py b/nova/virt/libvirt/imagecache.py
index efc57eea5..7256dcdb0 100644
--- a/nova/virt/libvirt/imagecache.py
+++ b/nova/virt/libvirt/imagecache.py
@@ -30,7 +30,6 @@ import time
from nova.compute import task_states
from nova.compute import vm_states
from nova import config
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import utils
@@ -57,10 +56,9 @@ imagecache_opts = [
]
CONF = config.CONF
+CONF.register_opts(imagecache_opts)
CONF.import_opt('instances_path', 'nova.compute.manager')
CONF.import_opt('base_dir_name', 'nova.compute.manager')
-FLAGS = flags.FLAGS
-FLAGS.register_opts(imagecache_opts)
def read_stored_checksum(target):
@@ -148,7 +146,7 @@ class ImageCacheManager(object):
image_ref_str = str(instance['image_ref'])
local, remote, insts = self.used_images.get(image_ref_str,
(0, 0, []))
- if instance['host'] == FLAGS.host:
+ if instance['host'] == CONF.host:
local += 1
else:
remote += 1
@@ -161,10 +159,10 @@ class ImageCacheManager(object):
def _list_backing_images(self):
"""List the backing images currently in use."""
inuse_images = []
- for ent in os.listdir(FLAGS.instances_path):
+ for ent in os.listdir(CONF.instances_path):
if ent in self.instance_names:
LOG.debug(_('%s is a valid instance name'), ent)
- disk_path = os.path.join(FLAGS.instances_path, ent, 'disk')
+ disk_path = os.path.join(CONF.instances_path, ent, 'disk')
if os.path.exists(disk_path):
LOG.debug(_('%s has a disk file'), ent)
backing_file = virtutils.get_disk_backing_file(disk_path)
@@ -174,8 +172,8 @@ class ImageCacheManager(object):
'backing': backing_file})
if backing_file:
- backing_path = os.path.join(FLAGS.instances_path,
- FLAGS.base_dir_name,
+ backing_path = os.path.join(CONF.instances_path,
+ CONF.base_dir_name,
backing_file)
if not backing_path in inuse_images:
inuse_images.append(backing_path)
@@ -226,7 +224,7 @@ class ImageCacheManager(object):
handle manually when it occurs.
"""
- if not FLAGS.checksum_base_images:
+ if not CONF.checksum_base_images:
return None
stored_checksum = read_stored_checksum(base_file)
@@ -272,9 +270,9 @@ class ImageCacheManager(object):
mtime = os.path.getmtime(base_file)
age = time.time() - mtime
- maxage = FLAGS.remove_unused_resized_minimum_age_seconds
+ maxage = CONF.remove_unused_resized_minimum_age_seconds
if base_file in self.originals:
- maxage = FLAGS.remove_unused_original_minimum_age_seconds
+ maxage = CONF.remove_unused_original_minimum_age_seconds
if age < maxage:
LOG.info(_('Base file too young to remove: %s'),
@@ -377,7 +375,7 @@ class ImageCacheManager(object):
# created, but may remain from previous versions.
self._reset_state()
- base_dir = os.path.join(FLAGS.instances_path, FLAGS.base_dir_name)
+ base_dir = os.path.join(CONF.instances_path, CONF.base_dir_name)
if not os.path.exists(base_dir):
LOG.debug(_('Skipping verification, no base directory at %s'),
base_dir)
@@ -423,7 +421,7 @@ class ImageCacheManager(object):
LOG.info(_('Removable base files: %s'),
' '.join(self.removable_base_files))
- if FLAGS.remove_unused_base_images:
+ if CONF.remove_unused_base_images:
for base_file in self.removable_base_files:
self._remove_base_file(base_file)
diff --git a/nova/virt/libvirt/utils.py b/nova/virt/libvirt/utils.py
index 856efec4f..9bda95b44 100644
--- a/nova/virt/libvirt/utils.py
+++ b/nova/virt/libvirt/utils.py
@@ -28,7 +28,6 @@ from lxml import etree
from nova import config
from nova import exception
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import fileutils
from nova.openstack.common import jsonutils
@@ -48,10 +47,9 @@ util_opts = [
]
CONF = config.CONF
+CONF.register_opts(util_opts)
CONF.import_opt('instances_path', 'nova.compute.manager')
CONF.import_opt('base_dir_name', 'nova.compute.manager')
-FLAGS = flags.FLAGS
-FLAGS.register_opts(util_opts)
def execute(*args, **kwargs):
@@ -170,7 +168,7 @@ def list_logical_volumes(vg):
:param vg: volume group name
"""
- out, err = execute('lvs', '--noheadings', '-o', 'lv_path', vg,
+ out, err = execute('lvs', '--noheadings', '-o', 'lv_name', vg,
run_as_root=True)
return [line.strip() for line in out.splitlines()]
@@ -212,12 +210,12 @@ def pick_disk_driver_name(is_block_dev=False):
:param is_block_dev:
:returns: driver_name or None
"""
- if FLAGS.libvirt_type == "xen":
+ if CONF.libvirt_type == "xen":
if is_block_dev:
return "phy"
else:
return "tap"
- elif FLAGS.libvirt_type in ('kvm', 'qemu'):
+ elif CONF.libvirt_type in ('kvm', 'qemu'):
return "qemu"
else:
# UML doesn't want a driver_name set
@@ -396,7 +394,7 @@ def find_disk(virt_dom):
May be file or device"""
xml_desc = virt_dom.XMLDesc(0)
domain = etree.fromstring(xml_desc)
- if FLAGS.libvirt_type == 'lxc':
+ if CONF.libvirt_type == 'lxc':
source = domain.find('devices/filesystem/source')
disk_path = source.get('dir')
disk_path = disk_path[0:disk_path.rfind('rootfs')]
@@ -417,7 +415,7 @@ def get_disk_type(path):
if path.startswith('/dev'):
return 'lvm'
- return images.qemu_img_info(path)['file format']
+ return images.qemu_img_info(path).file_format
def get_fs_info(path):
@@ -452,7 +450,7 @@ def get_info_filename(base_path):
"""
base_file = os.path.basename(base_path)
- return (FLAGS.image_info_filename_pattern
+ return (CONF.image_info_filename_pattern
% {'image': base_file})
@@ -460,7 +458,7 @@ def is_valid_info_file(path):
"""Test if a given path matches the pattern for info files."""
digest_size = hashlib.sha1().digestsize * 2
- regexp = (FLAGS.image_info_filename_pattern
+ regexp = (CONF.image_info_filename_pattern
% {'image': ('([0-9a-f]{%(digest_size)d}|'
'[0-9a-f]{%(digest_size)d}_sm|'
'[0-9a-f]{%(digest_size)d}_[0-9]+)'
diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py
index d6edd1ad8..feb6e456a 100644
--- a/nova/virt/libvirt/vif.py
+++ b/nova/virt/libvirt/vif.py
@@ -21,7 +21,6 @@
from nova import config
from nova import exception
-from nova import flags
from nova.network import linux_net
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
@@ -42,9 +41,8 @@ libvirt_vif_opts = [
help='Use virtio for bridge interfaces'),
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(libvirt_vif_opts)
CONF = config.CONF
+CONF.register_opts(libvirt_vif_opts)
CONF.import_opt('libvirt_type', 'nova.virt.libvirt.driver')
LINUX_DEV_LEN = 14
@@ -63,7 +61,7 @@ class LibvirtBridgeDriver(vif.VIFDriver):
conf.mac_addr = mapping['mac']
conf.source_dev = network['bridge']
conf.script = ""
- if FLAGS.libvirt_use_virtio_for_bridges:
+ if CONF.libvirt_use_virtio_for_bridges:
conf.model = "virtio"
conf.filtername = "nova-instance-" + instance['name'] + "-" + mac_id
@@ -71,15 +69,15 @@ class LibvirtBridgeDriver(vif.VIFDriver):
if mapping['dhcp_server']:
conf.add_filter_param("DHCPSERVER", mapping['dhcp_server'])
- if FLAGS.use_ipv6:
+ if CONF.use_ipv6:
conf.add_filter_param("RASERVER",
mapping.get('gateway_v6') + "/128")
- if FLAGS.allow_same_net_traffic:
+ if CONF.allow_same_net_traffic:
net, mask = netutils.get_net_and_mask(network['cidr'])
conf.add_filter_param("PROJNET", net)
conf.add_filter_param("PROJMASK", mask)
- if FLAGS.use_ipv6:
+ if CONF.use_ipv6:
net_v6, prefixlen_v6 = netutils.get_net_and_prefixlen(
network['cidr_v6'])
conf.add_filter_param("PROJNET6", net_v6)
@@ -93,7 +91,7 @@ class LibvirtBridgeDriver(vif.VIFDriver):
if (not network.get('multi_host') and
mapping.get('should_create_bridge')):
if mapping.get('should_create_vlan'):
- iface = FLAGS.vlan_interface or network['bridge_interface']
+ iface = CONF.vlan_interface or network['bridge_interface']
LOG.debug(_('Ensuring vlan %(vlan)s and bridge %(bridge)s'),
{'vlan': network['vlan'],
'bridge': network['bridge']},
@@ -103,7 +101,7 @@ class LibvirtBridgeDriver(vif.VIFDriver):
network['bridge'],
iface)
else:
- iface = FLAGS.flat_interface or network['bridge_interface']
+ iface = CONF.flat_interface or network['bridge_interface']
LOG.debug(_("Ensuring bridge %s"), network['bridge'],
instance=instance)
linux_net.LinuxBridgeInterfaceDriver.ensure_bridge(
@@ -129,7 +127,7 @@ class LibvirtOpenVswitchDriver(vif.VIFDriver):
def create_ovs_vif_port(self, dev, iface_id, mac, instance_id):
utils.execute('ovs-vsctl', '--', '--may-exist', 'add-port',
- FLAGS.libvirt_ovs_bridge, dev,
+ CONF.libvirt_ovs_bridge, dev,
'--', 'set', 'Interface', dev,
'external-ids:iface-id=%s' % iface_id,
'external-ids:iface-status=active',
@@ -138,7 +136,7 @@ class LibvirtOpenVswitchDriver(vif.VIFDriver):
run_as_root=True)
def delete_ovs_vif_port(self, dev):
- utils.execute('ovs-vsctl', 'del-port', FLAGS.libvirt_ovs_bridge,
+ utils.execute('ovs-vsctl', 'del-port', CONF.libvirt_ovs_bridge,
dev, run_as_root=True)
utils.execute('ip', 'link', 'delete', dev, run_as_root=True)
@@ -165,7 +163,7 @@ class LibvirtOpenVswitchDriver(vif.VIFDriver):
conf = vconfig.LibvirtConfigGuestInterface()
- if FLAGS.libvirt_use_virtio_for_bridges:
+ if CONF.libvirt_use_virtio_for_bridges:
conf.model = "virtio"
conf.net_type = "ethernet"
conf.target_dev = dev
@@ -260,9 +258,9 @@ class LibvirtOpenVswitchVirtualPortDriver(vif.VIFDriver):
conf = vconfig.LibvirtConfigGuestInterface()
conf.net_type = "bridge"
- conf.source_dev = FLAGS.libvirt_ovs_bridge
+ conf.source_dev = CONF.libvirt_ovs_bridge
conf.mac_addr = mapping['mac']
- if FLAGS.libvirt_use_virtio_for_bridges:
+ if CONF.libvirt_use_virtio_for_bridges:
conf.model = "virtio"
conf.vporttype = "openvswitch"
conf.add_vport_param("interfaceid", mapping['vif_uuid'])
@@ -277,6 +275,9 @@ class LibvirtOpenVswitchVirtualPortDriver(vif.VIFDriver):
class QuantumLinuxBridgeVIFDriver(vif.VIFDriver):
"""VIF driver for Linux Bridge when running Quantum."""
+ def get_bridge_name(self, network_id):
+ return ("brq" + network_id)[:LINUX_DEV_LEN]
+
def get_dev_name(self, iface_id):
return ("tap" + iface_id)[:LINUX_DEV_LEN]
@@ -285,26 +286,20 @@ class QuantumLinuxBridgeVIFDriver(vif.VIFDriver):
iface_id = mapping['vif_uuid']
dev = self.get_dev_name(iface_id)
- if FLAGS.libvirt_type != 'xen':
- linux_net.QuantumLinuxBridgeInterfaceDriver.create_tap_dev(dev)
+ bridge = self.get_bridge_name(network['id'])
+ linux_net.LinuxBridgeInterfaceDriver.ensure_bridge(bridge, None,
+ filtering=False)
conf = vconfig.LibvirtConfigGuestInterface()
-
- if FLAGS.libvirt_use_virtio_for_bridges:
- conf.model = 'virtio'
- conf.net_type = "ethernet"
conf.target_dev = dev
- conf.script = ""
+ conf.net_type = "bridge"
conf.mac_addr = mapping['mac']
+ conf.source_dev = bridge
+ if CONF.libvirt_use_virtio_for_bridges:
+ conf.model = "virtio"
return conf
def unplug(self, instance, vif):
- """Unplug the VIF by deleting the port from the bridge."""
- network, mapping = vif
- dev = self.get_dev_name(mapping['vif_uuid'])
- try:
- utils.execute('ip', 'link', 'delete', dev, run_as_root=True)
- except exception.ProcessExecutionError:
- LOG.warning(_("Failed while unplugging vif"), instance=instance)
- raise
+ """No action needed. Libvirt takes care of cleanup"""
+ pass
diff --git a/nova/virt/libvirt/volume.py b/nova/virt/libvirt/volume.py
index 7020c2518..b9b88ce21 100644
--- a/nova/virt/libvirt/volume.py
+++ b/nova/virt/libvirt/volume.py
@@ -22,7 +22,7 @@ import time
from nova import config
from nova import exception
-from nova import flags
+from nova.openstack.common import cfg
from nova.openstack.common import lockutils
from nova.openstack.common import log as logging
from nova import utils
@@ -30,9 +30,22 @@ from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import utils as virtutils
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
+
+volume_opts = [
+ cfg.IntOpt('num_iscsi_scan_tries',
+ default=3,
+ help='number of times to rescan iSCSI target to find volume'),
+ cfg.StrOpt('rbd_user',
+ default=None,
+ help='the RADOS client name for accessing rbd volumes'),
+ cfg.StrOpt('rbd_secret_uuid',
+ default=None,
+ help='the libvirt uuid of the secret for the rbd_user'
+ 'volumes')
+ ]
+
CONF = config.CONF
-CONF.import_opt('num_iscsi_scan_tries', 'nova.volume.driver')
+CONF.register_opts(volume_opts)
class LibvirtVolumeDriver(object):
@@ -92,11 +105,11 @@ class LibvirtNetVolumeDriver(LibvirtVolumeDriver):
netdisk_properties = connection_info['data']
auth_enabled = netdisk_properties.get('auth_enabled')
if (conf.source_protocol == 'rbd' and
- FLAGS.rbd_secret_uuid):
- conf.auth_secret_uuid = FLAGS.rbd_secret_uuid
+ CONF.rbd_secret_uuid):
+ conf.auth_secret_uuid = CONF.rbd_secret_uuid
auth_enabled = True # Force authentication locally
- if FLAGS.rbd_user:
- conf.auth_username = FLAGS.rbd_user
+ if CONF.rbd_user:
+ conf.auth_username = CONF.rbd_user
if auth_enabled:
conf.auth_username = (conf.auth_username or
netdisk_properties['auth_username'])
@@ -172,7 +185,7 @@ class LibvirtISCSIVolumeDriver(LibvirtVolumeDriver):
# TODO(justinsb): This retry-with-delay is a pattern, move to utils?
tries = 0
while not os.path.exists(host_device):
- if tries >= FLAGS.num_iscsi_scan_tries:
+ if tries >= CONF.num_iscsi_scan_tries:
raise exception.NovaException(_("iSCSI device not found at %s")
% (host_device))
diff --git a/nova/virt/libvirt/volume_nfs.py b/nova/virt/libvirt/volume_nfs.py
index cf901b23a..b952baffa 100644
--- a/nova/virt/libvirt/volume_nfs.py
+++ b/nova/virt/libvirt/volume_nfs.py
@@ -20,8 +20,8 @@
import ctypes
import os
+from nova import config
from nova import exception
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import utils
@@ -34,8 +34,8 @@ volume_opts = [
default='$state_path/mnt',
help='Base dir where nfs expected to be mounted on compute'),
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(volume_opts)
+CONF = config.CONF
+CONF.register_opts(volume_opts)
class NfsVolumeDriver(volume.LibvirtVolumeDriver):
@@ -63,7 +63,7 @@ class NfsVolumeDriver(volume.LibvirtVolumeDriver):
"""
@type nfs_export: string
"""
- mount_path = os.path.join(FLAGS.nfs_mount_point_base,
+ mount_path = os.path.join(CONF.nfs_mount_point_base,
self.get_hash_str(nfs_export))
self._mount_nfs(mount_path, nfs_export, ensure=True)
return mount_path
diff --git a/nova/virt/netutils.py b/nova/virt/netutils.py
index 05b642b8f..8caacbc7d 100644
--- a/nova/virt/netutils.py
+++ b/nova/virt/netutils.py
@@ -24,10 +24,7 @@
import netaddr
from nova import config
-from nova import flags
-
-FLAGS = flags.FLAGS
CONF = config.CONF
CONF.import_opt('injected_network_template', 'nova.virt.disk.api')
@@ -57,8 +54,8 @@ def get_ip_version(cidr):
return int(net.version)
-def get_injected_network_template(network_info, use_ipv6=FLAGS.use_ipv6,
- template=FLAGS.injected_network_template):
+def get_injected_network_template(network_info, use_ipv6=CONF.use_ipv6,
+ template=CONF.injected_network_template):
"""
return a rendered network template for the given network_info
diff --git a/nova/virt/powervm/driver.py b/nova/virt/powervm/driver.py
index f4f26045e..8bed966d6 100644
--- a/nova/virt/powervm/driver.py
+++ b/nova/virt/powervm/driver.py
@@ -17,8 +17,8 @@
from nova.compute import task_states
from nova.compute import vm_states
+from nova import config
from nova import context as nova_context
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
@@ -50,8 +50,8 @@ powervm_opts = [
help='Local directory to download glance images to'),
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(powervm_opts)
+CONF = config.CONF
+CONF.register_opts(powervm_opts)
class PowerVMDriver(driver.ComputeDriver):
@@ -141,7 +141,7 @@ class PowerVMDriver(driver.ComputeDriver):
"""Power on the specified instance"""
self._powervm.power_on(instance['name'])
- def get_available_resource(self):
+ def get_available_resource(self, nodename):
"""Retrieve resource info."""
return self._powervm.get_available_resource()
diff --git a/nova/virt/powervm/operator.py b/nova/virt/powervm/operator.py
index 2601f0537..59b83af04 100644
--- a/nova/virt/powervm/operator.py
+++ b/nova/virt/powervm/operator.py
@@ -20,12 +20,14 @@ import os
import re
import time
+from nova import config
from nova import exception as nova_exception
-from nova import flags
from nova import utils
from nova.compute import power_state
+from nova.openstack.common import excutils
from nova.openstack.common import log as logging
+
from nova.virt import images
from nova.virt.powervm import command
from nova.virt.powervm import common
@@ -35,14 +37,14 @@ from nova.virt.powervm import lpar as LPAR
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
+CONF = config.CONF
def get_powervm_operator():
- if FLAGS.powervm_mgr_type == 'ivm':
- return IVMOperator(common.Connection(FLAGS.powervm_mgr,
- FLAGS.powervm_mgr_user,
- FLAGS.powervm_mgr_passwd))
+ if CONF.powervm_mgr_type == 'ivm':
+ return IVMOperator(common.Connection(CONF.powervm_mgr,
+ CONF.powervm_mgr_user,
+ CONF.powervm_mgr_passwd))
class PowerVMOperator(object):
@@ -217,14 +219,14 @@ class PowerVMOperator(object):
"""Fetch image from glance and copy it to the remote system."""
try:
file_name = '.'.join([image_id, 'gz'])
- file_path = os.path.join(FLAGS.powervm_img_local_path,
+ file_path = os.path.join(CONF.powervm_img_local_path,
file_name)
LOG.debug(_("Fetching image '%s' from glance") % image_id)
images.fetch_to_raw(context, image_id, file_path,
instance['user_id'],
project_id=instance['project_id'])
LOG.debug(_("Copying image '%s' to IVM") % file_path)
- remote_path = FLAGS.powervm_img_remote_path
+ remote_path = CONF.powervm_img_remote_path
remote_file_name, size = self._operator.copy_image_file(
file_path, remote_path)
# Logical volume
@@ -263,7 +265,13 @@ class PowerVMOperator(object):
time.sleep(1)
except exception.PowerVMImageCreationFailed:
- self._cleanup(instance['name'])
+ with excutils.save_and_reraise_exception():
+ # log errors in cleanup
+ try:
+ self._cleanup(instance['name'])
+ except Exception:
+ LOG.exception(_('Error while attempting to '
+ 'clean up failed instance launch.'))
def destroy(self, instance_name):
"""Destroy (shutdown and delete) the specified instance.
diff --git a/nova/virt/virtapi.py b/nova/virt/virtapi.py
index 13aaa7e4d..85e638add 100644
--- a/nova/virt/virtapi.py
+++ b/nova/virt/virtapi.py
@@ -15,9 +15,6 @@
# under the License.
-from nova import db
-
-
class VirtAPI(object):
def instance_update(self, context, instance_uuid, updates):
"""Perform an instance update operation on behalf of a virt driver
@@ -42,3 +39,69 @@ class VirtAPI(object):
:param host: host running instances to be returned
"""
raise NotImplementedError()
+
+ def aggregate_get_by_host(self, context, host, key=None):
+ """Get a list of aggregates to which the specified host belongs
+ :param context: security context
+ :param host: the host for which aggregates should be returned
+ :param key: optionally filter by hosts with the given metadata key
+ """
+ raise NotImplementedError()
+
+ def aggregate_metadata_get(self, context, aggregate_id):
+ """Get metadata for the specified aggregate
+ :param context: security context
+ :param aggregate_id: id of aggregate for which metadata is to
+ be returned
+ """
+ raise NotImplementedError()
+
+ def aggregate_metadata_add(self, context, aggregate_id, metadata,
+ set_delete=False):
+ """Add/update metadata for specified aggregate
+ :param context: security context
+ :param aggregate_id: id of aggregate on which to update metadata
+ :param metadata: dict of metadata to add/update
+ :param set_delete: if True, only add
+ """
+ raise NotImplementedError()
+
+ def aggregate_metadata_delete(self, context, aggregate_id, key):
+ """Delete the given metadata key from specified aggregate
+ :param context: security context
+ :param aggregate_id: id of aggregate from which to delete metadata
+ :param key: metadata key to delete
+ """
+ raise NotImplementedError()
+
+ def security_group_get_by_instance(self, context, instance_uuid):
+ """Get the security group for a specified instance
+ :param context: security context
+ :param instance_uuid: instance defining the security group we want
+ """
+ raise NotImplementedError()
+
+ def security_group_rule_get_by_security_group(self, context,
+ security_group_id):
+ """Get the rules associated with a specified security group
+ :param context: security context
+ :param security_group_id: the security group for which the rules
+ should be returned
+ """
+ raise NotImplementedError()
+
+ def provider_fw_rule_get_all(self, context):
+ """Get the provider firewall rules
+ :param context: security context
+ """
+ raise NotImplementedError()
+
+ def agent_build_get_by_triple(self, context, hypervisor, os, architecture):
+ """Get information about the available agent builds for a given
+ hypervisor, os, and architecture
+ :param context: security context
+ :param hypervisor: agent hypervisor type
+ :param os: agent operating system type
+ :param architecture: agent architecture
+ """
+ raise NotImplementedError()
diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py
index e56f81213..f0673cba2 100644
--- a/nova/virt/vmwareapi/driver.py
+++ b/nova/virt/vmwareapi/driver.py
@@ -36,8 +36,8 @@ import time
from eventlet import event
+from nova import config
from nova import exception
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import utils
@@ -81,8 +81,8 @@ vmwareapi_opts = [
help='Physical ethernet adapter name for vlan networking'),
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(vmwareapi_opts)
+CONF = config.CONF
+CONF.register_opts(vmwareapi_opts)
TIME_BETWEEN_API_CALL_RETRIES = 2.0
@@ -103,10 +103,10 @@ class VMWareESXDriver(driver.ComputeDriver):
def __init__(self, virtapi, read_only=False, scheme="https"):
super(VMWareESXDriver, self).__init__(virtapi)
- host_ip = FLAGS.vmwareapi_host_ip
- host_username = FLAGS.vmwareapi_host_username
- host_password = FLAGS.vmwareapi_host_password
- api_retry_count = FLAGS.vmwareapi_api_retry_count
+ host_ip = CONF.vmwareapi_host_ip
+ host_username = CONF.vmwareapi_host_username
+ host_password = CONF.vmwareapi_host_password
+ api_retry_count = CONF.vmwareapi_api_retry_count
if not host_ip or host_username is None or host_password is None:
raise Exception(_("Must specify vmwareapi_host_ip,"
"vmwareapi_host_username "
@@ -177,7 +177,7 @@ class VMWareESXDriver(driver.ComputeDriver):
# TODO(vish): When volume attaching is supported, return the
# proper initiator iqn and host.
return {
- 'ip': FLAGS.vmwareapi_host_ip,
+ 'ip': CONF.vmwareapi_host_ip,
'initiator': None,
'host': None
}
@@ -192,11 +192,11 @@ class VMWareESXDriver(driver.ComputeDriver):
def get_console_pool_info(self, console_type):
"""Get info about the host on which the VM resides."""
- return {'address': FLAGS.vmwareapi_host_ip,
- 'username': FLAGS.vmwareapi_host_username,
- 'password': FLAGS.vmwareapi_host_password}
+ return {'address': CONF.vmwareapi_host_ip,
+ 'username': CONF.vmwareapi_host_username,
+ 'password': CONF.vmwareapi_host_password}
- def get_available_resource(self):
+ def get_available_resource(self, nodename):
"""This method is supported only by libvirt."""
return
@@ -373,7 +373,7 @@ class VMWareAPISession(object):
done = event.Event()
loop = utils.LoopingCall(self._poll_task, instance_uuid, task_ref,
done)
- loop.start(FLAGS.vmwareapi_task_poll_interval)
+ loop.start(CONF.vmwareapi_task_poll_interval)
ret_val = done.wait()
loop.stop()
return ret_val
diff --git a/nova/virt/vmwareapi/read_write_util.py b/nova/virt/vmwareapi/read_write_util.py
index b0d7cef0b..a20863d8c 100644
--- a/nova/virt/vmwareapi/read_write_util.py
+++ b/nova/virt/vmwareapi/read_write_util.py
@@ -27,13 +27,11 @@ import urllib
import urllib2
import urlparse
-from nova import flags
+from nova import config
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
-
USER_AGENT = "OpenStack-ESX-Adapter"
READ_CHUNKSIZE = 65536
diff --git a/nova/virt/vmwareapi/vif.py b/nova/virt/vmwareapi/vif.py
index a00dd5c36..ad7c5d536 100644
--- a/nova/virt/vmwareapi/vif.py
+++ b/nova/virt/vmwareapi/vif.py
@@ -17,8 +17,8 @@
"""VIF drivers for VMWare."""
+from nova import config
from nova import exception
-from nova import flags
from nova.openstack.common import log as logging
from nova.virt import vif
from nova.virt.vmwareapi import network_utils
@@ -26,8 +26,8 @@ from nova.virt.vmwareapi import network_utils
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
-FLAGS.set_default('vmwareapi_vlan_interface', 'vmnic0')
+CONF = config.CONF
+CONF.set_default('vmwareapi_vlan_interface', 'vmnic0')
class VMWareVlanBridgeDriver(vif.VIFDriver):
@@ -44,7 +44,7 @@ class VMWareVlanBridgeDriver(vif.VIFDriver):
"""Create a vlan and bridge unless they already exist."""
vlan_num = network['vlan']
bridge = network['bridge']
- vlan_interface = FLAGS.vmwareapi_vlan_interface
+ vlan_interface = CONF.vmwareapi_vlan_interface
# Check if the vlan_interface physical network adapter exists on the
# host.
diff --git a/nova/virt/vmwareapi/vim.py b/nova/virt/vmwareapi/vim.py
index d8684ce7c..2894074d6 100644
--- a/nova/virt/vmwareapi/vim.py
+++ b/nova/virt/vmwareapi/vim.py
@@ -26,7 +26,7 @@ try:
except ImportError:
suds = None
-from nova import flags
+from nova import config
from nova.openstack.common import cfg
from nova.virt.vmwareapi import error_util
@@ -41,8 +41,8 @@ vmwareapi_wsdl_loc_opt = cfg.StrOpt('vmwareapi_wsdl_loc',
'Due to a bug in vSphere ESX 4.1 default wsdl. '
'Refer readme-vmware to setup')
-FLAGS = flags.FLAGS
-FLAGS.register_opt(vmwareapi_wsdl_loc_opt)
+CONF = config.CONF
+CONF.register_opt(vmwareapi_wsdl_loc_opt)
if suds:
@@ -85,7 +85,7 @@ class Vim:
self._protocol = protocol
self._host_name = host
- wsdl_url = FLAGS.vmwareapi_wsdl_loc
+ wsdl_url = CONF.vmwareapi_wsdl_loc
if wsdl_url is None:
raise Exception(_("Must specify vmwareapi_wsdl_loc"))
# TODO(sateesh): Use this when VMware fixes their faulty wsdl
@@ -103,7 +103,7 @@ class Vim:
def __getattr__(self, attr_name):
"""Makes the API calls and gets the result."""
try:
- return object.__getattr__(self, attr_name)
+ return getattr(self, attr_name)
except AttributeError:
def vim_request_handler(managed_object, **kwargs):
diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py
index 541e16e44..4cb5b9af6 100644
--- a/nova/virt/vmwareapi/vmops.py
+++ b/nova/virt/vmwareapi/vmops.py
@@ -27,8 +27,8 @@ import urllib2
import uuid
from nova.compute import power_state
+from nova import config
from nova import exception
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
@@ -42,8 +42,8 @@ vmware_vif_driver_opt = cfg.StrOpt('vmware_vif_driver',
default='nova.virt.vmwareapi.vif.VMWareVlanBridgeDriver',
help='The VMWare VIF driver to configure the VIFs.')
-FLAGS = flags.FLAGS
-FLAGS.register_opt(vmware_vif_driver_opt)
+CONF = config.CONF
+CONF.register_opt(vmware_vif_driver_opt)
LOG = logging.getLogger(__name__)
@@ -59,7 +59,7 @@ class VMWareVMOps(object):
def __init__(self, session):
"""Initializer."""
self._session = session
- self._vif_driver = importutils.import_object(FLAGS.vmware_vif_driver)
+ self._vif_driver = importutils.import_object(CONF.vmware_vif_driver)
def list_instances(self):
"""Lists the VM instances that are registered with the ESX host."""
@@ -207,7 +207,7 @@ class VMWareVMOps(object):
# Set the machine.id parameter of the instance to inject
# the NIC configuration inside the VM
- if FLAGS.flat_injected:
+ if CONF.flat_injected:
self._set_machine_id(client_factory, instance, network_info)
# Naming the VM files in correspondence with the VM instance name
diff --git a/nova/virt/xenapi/agent.py b/nova/virt/xenapi/agent.py
index 605c95cfd..bf1ea958d 100644
--- a/nova/virt/xenapi/agent.py
+++ b/nova/virt/xenapi/agent.py
@@ -21,7 +21,7 @@ import os
import time
import uuid
-from nova import flags
+from nova import config
from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
@@ -49,15 +49,15 @@ xenapi_agent_opts = [
'configuration is not injected into the image. '
'Used if compute_driver=xenapi.XenAPIDriver and '
' flat_injected=True'),
- cfg.StrOpt('xenapi_disable_agent',
+ cfg.BoolOpt('xenapi_disable_agent',
default=False,
help='Disable XenAPI agent. Reduces the amount of time '
'it takes nova to detect that a VM has started, when '
'that VM does not have the agent installed'),
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(xenapi_agent_opts)
+CONF = config.CONF
+CONF.register_opts(xenapi_agent_opts)
def _call_agent(session, instance, vm_ref, method, addl_args=None,
@@ -66,7 +66,7 @@ def _call_agent(session, instance, vm_ref, method, addl_args=None,
if addl_args is None:
addl_args = {}
if timeout is None:
- timeout = FLAGS.agent_timeout
+ timeout = CONF.agent_timeout
vm_rec = session.call_xenapi("VM.get_record", vm_ref)
@@ -137,7 +137,7 @@ class XenAPIBasedAgent(object):
# also take a while to boot. So we need to be more patient than
# normal as well as watch for domid changes
- expiration = time.time() + FLAGS.agent_version_timeout
+ expiration = time.time() + CONF.agent_version_timeout
while time.time() < expiration:
ret = _get_agent_version(self.session, self.instance, self.vm_ref)
if ret:
@@ -235,7 +235,7 @@ class XenAPIBasedAgent(object):
resp = _call_agent(
self.session, self.instance, self.vm_ref, 'resetnetwork',
- timeout=FLAGS.agent_resetnetwork_timeout)
+ timeout=CONF.agent_resetnetwork_timeout)
if resp['returncode'] != '0':
LOG.error(_('Failed to reset network: %(resp)r'), locals(),
instance=self.instance)
@@ -249,10 +249,10 @@ def find_guest_agent(base_dir):
tries to locate a guest agent at the path
specificed by agent_rel_path
"""
- if FLAGS.xenapi_disable_agent:
+ if CONF.xenapi_disable_agent:
return False
- agent_rel_path = FLAGS.xenapi_agent_path
+ agent_rel_path = CONF.xenapi_agent_path
agent_path = os.path.join(base_dir, agent_rel_path)
if os.path.isfile(agent_path):
# The presence of the guest agent
diff --git a/nova/virt/xenapi/driver.py b/nova/virt/xenapi/driver.py
index 4d032e891..edff6934c 100644
--- a/nova/virt/xenapi/driver.py
+++ b/nova/virt/xenapi/driver.py
@@ -46,10 +46,9 @@ import xmlrpclib
from eventlet import queue
from eventlet import timeout
+from nova import config
from nova import context
-from nova import db
from nova import exception
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.virt import driver
@@ -118,8 +117,8 @@ xenapi_opts = [
help='Timeout in seconds for XenAPI login.'),
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(xenapi_opts)
+CONF = config.CONF
+CONF.register_opts(xenapi_opts)
class XenAPIDriver(driver.ComputeDriver):
@@ -128,23 +127,23 @@ class XenAPIDriver(driver.ComputeDriver):
def __init__(self, virtapi, read_only=False):
super(XenAPIDriver, self).__init__(virtapi)
- url = FLAGS.xenapi_connection_url
- username = FLAGS.xenapi_connection_username
- password = FLAGS.xenapi_connection_password
+ url = CONF.xenapi_connection_url
+ username = CONF.xenapi_connection_username
+ password = CONF.xenapi_connection_password
if not url or password is None:
raise Exception(_('Must specify xenapi_connection_url, '
'xenapi_connection_username (optionally), and '
'xenapi_connection_password to use '
'compute_driver=xenapi.XenAPIDriver'))
- self._session = XenAPISession(url, username, password)
+ self._session = XenAPISession(url, username, password, self.virtapi)
self._volumeops = volumeops.VolumeOps(self._session)
self._host_state = None
self._host = host.Host(self._session, self.virtapi)
self._vmops = vmops.VMOps(self._session, self.virtapi)
self._initiator = None
self._hypervisor_hostname = None
- self._pool = pool.ResourcePool(self._session)
+ self._pool = pool.ResourcePool(self._session, self.virtapi)
@property
def host_state(self):
@@ -153,7 +152,7 @@ class XenAPIDriver(driver.ComputeDriver):
return self._host_state
def init_host(self, host):
- if FLAGS.xenapi_check_host:
+ if CONF.xenapi_check_host:
vm_utils.ensure_correct_host(self._session)
try:
@@ -368,7 +367,7 @@ class XenAPIDriver(driver.ComputeDriver):
@staticmethod
def get_host_ip_addr():
- xs_url = urlparse.urlparse(FLAGS.xenapi_connection_url)
+ xs_url = urlparse.urlparse(CONF.xenapi_connection_url)
return xs_url.netloc
def attach_volume(self, connection_info, instance_name, mountpoint):
@@ -384,17 +383,18 @@ class XenAPIDriver(driver.ComputeDriver):
mountpoint)
def get_console_pool_info(self, console_type):
- xs_url = urlparse.urlparse(FLAGS.xenapi_connection_url)
+ xs_url = urlparse.urlparse(CONF.xenapi_connection_url)
return {'address': xs_url.netloc,
- 'username': FLAGS.xenapi_connection_username,
- 'password': FLAGS.xenapi_connection_password}
+ 'username': CONF.xenapi_connection_username,
+ 'password': CONF.xenapi_connection_password}
- def get_available_resource(self):
+ def get_available_resource(self, nodename):
"""Retrieve resource info.
This method is called when nova-compute launches, and
as part of a periodic task.
+ :param nodename: ignored in this driver
:returns: dictionary describing resources
"""
@@ -615,7 +615,7 @@ class XenAPIDriver(driver.ComputeDriver):
class XenAPISession(object):
"""The session to invoke XenAPI SDK calls"""
- def __init__(self, url, user, pw):
+ def __init__(self, url, user, pw, virtapi):
import XenAPI
self.XenAPI = XenAPI
self._sessions = queue.Queue()
@@ -627,11 +627,12 @@ class XenAPISession(object):
self.host_uuid = self._get_host_uuid()
self.product_version, self.product_brand = \
self._get_product_version_and_brand()
+ self._virtapi = virtapi
def _create_first_session(self, url, user, pw, exception):
try:
session = self._create_session(url)
- with timeout.Timeout(FLAGS.xenapi_login_timeout, exception):
+ with timeout.Timeout(CONF.xenapi_login_timeout, exception):
session.login_with_password(user, pw)
except self.XenAPI.Failure, e:
# if user and pw of the master are different, we're doomed!
@@ -647,21 +648,22 @@ class XenAPISession(object):
return url
def _populate_session_pool(self, url, user, pw, exception):
- for i in xrange(FLAGS.xenapi_connection_concurrent - 1):
+ for i in xrange(CONF.xenapi_connection_concurrent - 1):
session = self._create_session(url)
- with timeout.Timeout(FLAGS.xenapi_login_timeout, exception):
+ with timeout.Timeout(CONF.xenapi_login_timeout, exception):
session.login_with_password(user, pw)
self._sessions.put(session)
def _get_host_uuid(self):
if self.is_slave:
- aggr = db.aggregate_get_by_host(context.get_admin_context(),
- FLAGS.host, key=pool_states.POOL_FLAG)[0]
+ aggr = self._virtapi.aggregate_get_by_host(
+ context.get_admin_context(),
+ CONF.host, key=pool_states.POOL_FLAG)[0]
if not aggr:
LOG.error(_('Host is member of a pool, but DB '
'says otherwise'))
raise exception.AggregateHostNotFound()
- return aggr.metadetails[FLAGS.host]
+ return aggr.metadetails[CONF.host]
else:
with self._get_session() as session:
host_ref = session.xenapi.session.get_this_host(session.handle)
diff --git a/nova/virt/xenapi/firewall.py b/nova/virt/xenapi/firewall.py
index f2b90c74b..48f340e99 100644
--- a/nova/virt/xenapi/firewall.py
+++ b/nova/virt/xenapi/firewall.py
@@ -17,17 +17,14 @@
# License for the specific language governing permissions and limitations
# under the License.
+from nova import config
from nova import context
-from nova.db import api as db
-from nova import flags
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.virt import firewall
from nova.virt import netutils
-
LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
class Dom0IptablesFirewallDriver(firewall.IptablesFirewallDriver):
@@ -47,9 +44,9 @@ class Dom0IptablesFirewallDriver(firewall.IptablesFirewallDriver):
json_ret = jsonutils.loads(ret)
return (json_ret['out'], json_ret['err'])
- def __init__(self, xenapi_session=None, **kwargs):
+ def __init__(self, virtapi, xenapi_session=None, **kwargs):
from nova.network import linux_net
- super(Dom0IptablesFirewallDriver, self).__init__(**kwargs)
+ super(Dom0IptablesFirewallDriver, self).__init__(virtapi, **kwargs)
self._session = xenapi_session
# Create IpTablesManager with executor through plugin
self.iptables = linux_net.IptablesManager(self._plugin_execute)
@@ -66,8 +63,7 @@ class Dom0IptablesFirewallDriver(firewall.IptablesFirewallDriver):
return ['--dport', '%s:%s' % (rule.from_port,
rule.to_port)]
- @staticmethod
- def _provider_rules():
+ def _provider_rules(self):
"""Generate a list of rules from provider for IP4 & IP6.
Note: We could not use the common code from virt.firewall because
XS doesn't accept the '-m multiport' option"""
@@ -75,7 +71,7 @@ class Dom0IptablesFirewallDriver(firewall.IptablesFirewallDriver):
ctxt = context.get_admin_context()
ipv4_rules = []
ipv6_rules = []
- rules = db.provider_fw_rule_get_all(ctxt)
+ rules = self._virtapi.provider_fw_rule_get_all(ctxt)
for rule in rules:
LOG.debug(_('Adding provider rule: %s'), rule['cidr'])
version = netutils.get_ip_version(rule['cidr'])
diff --git a/nova/virt/xenapi/host.py b/nova/virt/xenapi/host.py
index 39a3b9824..30d085fe2 100644
--- a/nova/virt/xenapi/host.py
+++ b/nova/virt/xenapi/host.py
@@ -24,7 +24,6 @@ import logging
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
-from nova import db
from nova import exception
from nova import notifications
from nova.openstack.common import jsonutils
@@ -77,7 +76,15 @@ class Host(object):
instance = self._virtapi.instance_get_by_uuid(ctxt, uuid)
vm_counter = vm_counter + 1
- dest = _host_find(ctxt, self._session, host, host_ref)
+ aggregate = self._virtapi.aggregate_get_by_host(
+ ctxt, host, key=pool_states.POOL_FLAG)
+ if not aggregate:
+ msg = _('Aggregate for host %(host)s count not be'
+ ' found.') % dict(host=host)
+ raise exception.NotFound(msg)
+
+ dest = _host_find(ctxt, self._session, aggregate[0],
+ host_ref)
(old_ref, new_ref) = self._virtapi.instance_update(
ctxt,
instance['uuid'],
@@ -222,26 +229,23 @@ def _uuid_find(virtapi, context, host, name_label):
return None
-def _host_find(context, session, src, dst):
+def _host_find(context, session, src_aggregate, dst):
"""Return the host from the xenapi host reference.
- :param src: the compute host being put in maintenance (source of VMs)
+ :param src_aggregate: the aggregate that the compute host being put in
+ maintenance (source of VMs) belongs to
:param dst: the hypervisor host reference (destination of VMs)
:return: the compute host that manages dst
"""
# NOTE: this would be a lot simpler if nova-compute stored
- # FLAGS.host in the XenServer host's other-config map.
+ # CONF.host in the XenServer host's other-config map.
# TODO(armando-migliaccio): improve according the note above
- aggregate = db.aggregate_get_by_host(context, src,
- key=pool_states.POOL_FLAG)[0]
- if not aggregate:
- raise exception.AggregateHostNotFound(host=src)
uuid = session.call_xenapi('host.get_record', dst)['uuid']
- for compute_host, host_uuid in aggregate.metadetails.iteritems():
+ for compute_host, host_uuid in src_aggregate.metadetails.iteritems():
if host_uuid == uuid:
return compute_host
raise exception.NoValidHost(reason='Host %(host_uuid)s could not be found '
'from aggregate metadata: %(metadata)s.' %
{'host_uuid': uuid,
- 'metadata': aggregate.metadetails})
+ 'metadata': src_aggregate.metadetails})
diff --git a/nova/virt/xenapi/pool.py b/nova/virt/xenapi/pool.py
index 0c2565dbd..fe91a762a 100644
--- a/nova/virt/xenapi/pool.py
+++ b/nova/virt/xenapi/pool.py
@@ -22,9 +22,8 @@ Management class for Pool-related functions (join, eject, etc).
import urlparse
from nova.compute import rpcapi as compute_rpcapi
-from nova import db
+from nova import config
from nova import exception
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
@@ -40,28 +39,30 @@ xenapi_pool_opts = [
help='To use for hosts with different CPUs'),
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(xenapi_pool_opts)
+CONF = config.CONF
+CONF.register_opts(xenapi_pool_opts)
class ResourcePool(object):
"""
Implements resource pool operations.
"""
- def __init__(self, session):
+ def __init__(self, session, virtapi):
host_ref = session.get_xenapi_host()
host_rec = session.call_xenapi('host.get_record', host_ref)
self._host_name = host_rec['hostname']
self._host_addr = host_rec['address']
self._host_uuid = host_rec['uuid']
self._session = session
+ self._virtapi = virtapi
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
def _is_hv_pool(self, context, aggregate_id):
- return pool_states.is_hv_pool(context, aggregate_id)
+ return pool_states.is_hv_pool(
+ self._virtapi.aggregate_metadata_get(context, aggregate_id))
def _get_metadata(self, context, aggregate_id):
- return db.aggregate_metadata_get(context, aggregate_id)
+ return self._virtapi.aggregate_metadata_get(context, aggregate_id)
def undo_aggregate_operation(self, context, op, aggregate_id,
host, set_error):
@@ -69,7 +70,8 @@ class ResourcePool(object):
try:
if set_error:
metadata = {pool_states.KEY: pool_states.ERROR}
- db.aggregate_metadata_add(context, aggregate_id, metadata)
+ self._virtapi.aggregate_metadata_add(context, aggregate_id,
+ metadata)
op(context, aggregate_id, host)
except Exception:
LOG.exception(_('Aggregate %(aggregate_id)s: unrecoverable state '
@@ -95,8 +97,9 @@ class ResourcePool(object):
if (self._get_metadata(context, aggregate.id)[pool_states.KEY]
== pool_states.CREATED):
- db.aggregate_metadata_add(context, aggregate.id,
- {pool_states.KEY: pool_states.CHANGING})
+ self._virtapi.aggregate_metadata_add(context, aggregate.id,
+ {pool_states.KEY:
+ pool_states.CHANGING})
if len(aggregate.hosts) == 1:
# this is the first host of the pool -> make it master
self._init_pool(aggregate.id, aggregate.name)
@@ -104,13 +107,14 @@ class ResourcePool(object):
metadata = {'master_compute': host,
host: self._host_uuid,
pool_states.KEY: pool_states.ACTIVE}
- db.aggregate_metadata_add(context, aggregate.id, metadata)
+ self._virtapi.aggregate_metadata_add(context, aggregate.id,
+ metadata)
else:
# the pool is already up and running, we need to figure out
# whether we can serve the request from this host or not.
master_compute = self._get_metadata(context,
aggregate.id)['master_compute']
- if master_compute == FLAGS.host and master_compute != host:
+ if master_compute == CONF.host and master_compute != host:
# this is the master -> do a pool-join
# To this aim, nova compute on the slave has to go down.
# NOTE: it is assumed that ONLY nova compute is running now
@@ -119,7 +123,8 @@ class ResourcePool(object):
slave_info.get('url'), slave_info.get('user'),
slave_info.get('passwd'))
metadata = {host: slave_info.get('xenhost_uuid'), }
- db.aggregate_metadata_add(context, aggregate.id, metadata)
+ self._virtapi.aggregate_metadata_add(context, aggregate.id,
+ metadata)
elif master_compute and master_compute != host:
# send rpc cast to master, asking to add the following
# host with specified credentials.
@@ -147,12 +152,13 @@ class ResourcePool(object):
master_compute = self._get_metadata(context,
aggregate.id)['master_compute']
- if master_compute == FLAGS.host and master_compute != host:
+ if master_compute == CONF.host and master_compute != host:
# this is the master -> instruct it to eject a host from the pool
host_uuid = self._get_metadata(context, aggregate.id)[host]
self._eject_slave(aggregate.id,
slave_info.get('compute_uuid'), host_uuid)
- db.aggregate_metadata_delete(context, aggregate.id, host)
+ self._virtapi.aggregate_metadata_delete(context, aggregate.id,
+ host)
elif master_compute == host:
# Remove master from its own pool -> destroy pool only if the
# master is on its own, otherwise raise fault. Destroying a
@@ -168,7 +174,8 @@ class ResourcePool(object):
% locals())
self._clear_pool(aggregate.id)
for key in ['master_compute', host]:
- db.aggregate_metadata_delete(context, aggregate.id, key)
+ self._virtapi.aggregate_metadata_delete(context, aggregate.id,
+ key)
elif master_compute and master_compute != host:
# A master exists -> forward pool-eject request to master
slave_info = self._create_slave_info()
@@ -190,10 +197,10 @@ class ResourcePool(object):
'url': url,
'user': user,
'password': passwd,
- 'force': jsonutils.dumps(FLAGS.use_join_force),
+ 'force': jsonutils.dumps(CONF.use_join_force),
'master_addr': self._host_addr,
- 'master_user': FLAGS.xenapi_connection_username,
- 'master_pass': FLAGS.xenapi_connection_password, }
+ 'master_user': CONF.xenapi_connection_username,
+ 'master_pass': CONF.xenapi_connection_password, }
self._session.call_plugin('xenhost', 'host_join', args)
except self._session.XenAPI.Failure as e:
LOG.error(_("Pool-Join failed: %(e)s") % locals())
@@ -249,12 +256,12 @@ class ResourcePool(object):
# because this might be 169.254.0.1, i.e. xenapi
# NOTE: password in clear is not great, but it'll do for now
sender_url = swap_xapi_host(
- FLAGS.xenapi_connection_url, self._host_addr)
+ CONF.xenapi_connection_url, self._host_addr)
return {
"url": sender_url,
- "user": FLAGS.xenapi_connection_username,
- "passwd": FLAGS.xenapi_connection_password,
+ "user": CONF.xenapi_connection_username,
+ "passwd": CONF.xenapi_connection_password,
"compute_uuid": vm_utils.get_this_vm_uuid(),
"xenhost_uuid": self._host_uuid,
}
diff --git a/nova/virt/xenapi/pool_states.py b/nova/virt/xenapi/pool_states.py
index 82a85ce14..e17a4ab94 100644
--- a/nova/virt/xenapi/pool_states.py
+++ b/nova/virt/xenapi/pool_states.py
@@ -36,7 +36,6 @@ an 'active' pool goes into an 'error' state. To recover from such a state,
admin intervention is required. Currently an error state is irreversible,
that is, in order to recover from it an pool must be deleted.
"""
-from nova import db
CREATED = 'created'
CHANGING = 'changing'
@@ -49,7 +48,6 @@ KEY = 'operational_state'
POOL_FLAG = 'hypervisor_pool'
-def is_hv_pool(context, aggregate_id):
+def is_hv_pool(metadata):
"""Checks if aggregate is a hypervisor_pool"""
- metadata = db.aggregate_metadata_get(context, aggregate_id)
return POOL_FLAG in metadata.keys()
diff --git a/nova/virt/xenapi/vif.py b/nova/virt/xenapi/vif.py
index 6d943804d..e0b7f8e07 100644
--- a/nova/virt/xenapi/vif.py
+++ b/nova/virt/xenapi/vif.py
@@ -19,7 +19,7 @@
"""VIF drivers for XenAPI."""
-from nova import flags
+from nova import config
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.virt import vif
@@ -31,8 +31,8 @@ xenapi_ovs_integration_bridge_opt = cfg.StrOpt('xenapi_ovs_integration_bridge',
default='xapi1',
help='Name of Integration Bridge used by Open vSwitch')
-FLAGS = flags.FLAGS
-FLAGS.register_opt(xenapi_ovs_integration_bridge_opt)
+CONF = config.CONF
+CONF.register_opt(xenapi_ovs_integration_bridge_opt)
LOG = logging.getLogger(__name__)
@@ -76,7 +76,7 @@ class XenAPIBridgeDriver(XenVIFDriver):
vlan_num = network.get_meta('vlan')
bridge = network['bridge']
- bridge_interface = (FLAGS.vlan_interface or
+ bridge_interface = (CONF.vlan_interface or
network.get_meta('bridge_interface'))
# Check whether bridge already exists
# Retrieve network whose name_label is "bridge"
@@ -144,7 +144,7 @@ class XenAPIOpenVswitchDriver(XenVIFDriver):
# with OVS model, always plug into an OVS integration bridge
# that is already created
network_ref = network_utils.find_network_with_bridge(
- self._session, FLAGS.xenapi_ovs_integration_bridge)
+ self._session, CONF.xenapi_ovs_integration_bridge)
vif_rec = {}
vif_rec['device'] = str(device)
vif_rec['network'] = network_ref
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index 02fd9188f..5bf4e1148 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -37,9 +37,8 @@ from eventlet import greenthread
from nova import block_device
from nova.compute import instance_types
from nova.compute import power_state
-from nova import db
+from nova import config
from nova import exception
-from nova import flags
from nova.image import glance
from nova.openstack.common import cfg
from nova.openstack.common import excutils
@@ -116,8 +115,8 @@ xenapi_vm_utils_opts = [
' within a given dom0. (-1 = no limit)')
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(xenapi_vm_utils_opts)
+CONF = config.CONF
+CONF.register_opts(xenapi_vm_utils_opts)
XENAPI_POWER_STATE = {
'Halted': power_state.SHUTDOWN,
@@ -180,6 +179,13 @@ class ImageType(object):
}.get(image_type_id)
+def _system_metadata_to_dict(system_metadata):
+ result = {}
+ for item in system_metadata:
+ result[item['key']] = item['value']
+ return result
+
+
def create_vm(session, instance, name_label, kernel, ramdisk,
use_pv_kernel=False):
"""Create a VM record. Returns new VM reference.
@@ -269,22 +275,42 @@ def destroy_vm(session, instance, vm_ref):
LOG.debug(_("VM destroyed"), instance=instance)
-def shutdown_vm(session, instance, vm_ref, hard=True):
- vm_rec = session.call_xenapi("VM.get_record", vm_ref)
- state = compile_info(vm_rec)['state']
- if state == power_state.SHUTDOWN:
+def clean_shutdown_vm(session, instance, vm_ref):
+ if _is_vm_shutdown(session, vm_ref):
LOG.warn(_("VM already halted, skipping shutdown..."),
instance=instance)
- return
+ return False
- LOG.debug(_("Shutting down VM"), instance=instance)
+ LOG.debug(_("Shutting down VM (cleanly)"), instance=instance)
try:
- if hard:
- session.call_xenapi('VM.hard_shutdown', vm_ref)
- else:
- session.call_xenapi('VM.clean_shutdown', vm_ref)
+ session.call_xenapi('VM.clean_shutdown', vm_ref)
except session.XenAPI.Failure, exc:
LOG.exception(exc)
+ return False
+ return True
+
+
+def hard_shutdown_vm(session, instance, vm_ref):
+ if _is_vm_shutdown(session, vm_ref):
+ LOG.warn(_("VM already halted, skipping shutdown..."),
+ instance=instance)
+ return False
+
+ LOG.debug(_("Shutting down VM (hard)"), instance=instance)
+ try:
+ session.call_xenapi('VM.hard_shutdown', vm_ref)
+ except session.XenAPI.Failure, exc:
+ LOG.exception(exc)
+ return False
+ return True
+
+
+def _is_vm_shutdown(session, vm_ref):
+ vm_rec = session.call_xenapi("VM.get_record", vm_ref)
+ state = compile_info(vm_rec)['state']
+ if state == power_state.SHUTDOWN:
+ return True
+ return False
def ensure_free_mem(session, instance):
@@ -318,7 +344,7 @@ def unplug_vbd(session, vbd_ref):
# DEVICE_DETACH_REJECTED. For reasons which we don't understand,
# we're seeing the device still in use, even when all processes
# using the device should be dead.
- max_attempts = FLAGS.xenapi_num_vbd_unplug_retries + 1
+ max_attempts = CONF.xenapi_num_vbd_unplug_retries + 1
for num_attempt in xrange(1, max_attempts + 1):
try:
session.call_xenapi('VBD.unplug', vbd_ref)
@@ -609,7 +635,7 @@ def get_sr_path(session):
sr_ref = safe_find_sr(session)
sr_rec = session.call_xenapi("SR.get_record", sr_ref)
sr_uuid = sr_rec["uuid"]
- return os.path.join(FLAGS.xenapi_sr_base_path, sr_uuid)
+ return os.path.join(CONF.xenapi_sr_base_path, sr_uuid)
def destroy_cached_images(session, sr_ref, all_cached=False, dry_run=False):
@@ -695,7 +721,7 @@ def upload_image(context, session, instance, vdi_uuids, image_id):
properties = {
'auto_disk_config': instance['auto_disk_config'],
- 'os_type': instance['os_type'] or FLAGS.default_os_type,
+ 'os_type': instance['os_type'] or CONF.default_os_type,
}
params = {'vdi_uuids': vdi_uuids,
@@ -831,7 +857,7 @@ def generate_ephemeral(session, instance, vm_ref, userdevice, name_label,
size_gb):
_generate_disk(session, instance, vm_ref, userdevice, name_label,
'ephemeral', size_gb * 1024,
- FLAGS.default_ephemeral_format)
+ CONF.default_ephemeral_format)
def create_kernel_image(context, session, instance, name_label, image_id,
@@ -842,7 +868,7 @@ def create_kernel_image(context, session, instance, name_label, image_id,
Returns: A list of dictionaries that describe VDIs
"""
filename = ""
- if FLAGS.cache_images:
+ if CONF.cache_images:
args = {}
args['cached-image'] = image_id
args['new-image-uuid'] = str(uuid.uuid4())
@@ -872,7 +898,7 @@ def _create_cached_image(context, session, instance, name_label,
sr_type = session.call_xenapi('SR.get_record', sr_ref)["type"]
vdis = {}
- if FLAGS.use_cow_images and sr_type != "ext":
+ if CONF.use_cow_images and sr_type != "ext":
LOG.warning(_("Fast cloning is only supported on default local SR "
"of type ext. SR on this system was found to be of "
"type %(sr_type)s. Ignoring the cow flag.")
@@ -890,7 +916,7 @@ def _create_cached_image(context, session, instance, name_label,
session.call_xenapi('VDI.add_to_other_config',
root_vdi_ref, 'image-id', str(image_id))
- if FLAGS.use_cow_images and sr_type == 'ext':
+ if CONF.use_cow_images and sr_type == 'ext':
new_vdi_ref = _clone_vdi(session, root_vdi_ref)
else:
new_vdi_ref = _safe_copy_vdi(session, sr_ref, instance, root_vdi_ref)
@@ -913,7 +939,7 @@ def _create_image(context, session, instance, name_label, image_id,
Returns: A list of dictionaries that describe VDIs
"""
- cache_images = FLAGS.cache_images.lower()
+ cache_images = CONF.cache_images.lower()
# Deterimine if the image is cacheable
if image_type == ImageType.DISK_ISO:
@@ -921,9 +947,7 @@ def _create_image(context, session, instance, name_label, image_id,
elif cache_images == 'all':
cache = True
elif cache_images == 'some':
- # FIXME(sirp): This should be eager loaded like instance metadata
- sys_meta = db.instance_system_metadata_get(context,
- instance['uuid'])
+ sys_meta = _system_metadata_to_dict(instance['system_metadata'])
try:
cache = utils.bool_from_str(sys_meta['image_cache_in_nova'])
except KeyError:
@@ -932,7 +956,7 @@ def _create_image(context, session, instance, name_label, image_id,
cache = False
else:
LOG.warning(_("Unrecognized cache_images value '%s', defaulting to"
- " True"), FLAGS.cache_images)
+ " True"), CONF.cache_images)
cache = True
# Fetch (and cache) the image
@@ -974,7 +998,7 @@ def _fetch_image(context, session, instance, name_label, image_id, image_type):
def _fetch_using_dom0_plugin_with_retry(context, session, image_id,
plugin_name, params, callback=None):
- max_attempts = FLAGS.glance_num_retries + 1
+ max_attempts = CONF.glance_num_retries + 1
sleep_time = 0.5
for attempt_num in xrange(1, max_attempts + 1):
LOG.info(_('download_vhd %(image_id)s, '
@@ -1011,14 +1035,12 @@ def _make_uuid_stack():
def _image_uses_bittorrent(context, instance):
bittorrent = False
- xenapi_torrent_images = FLAGS.xenapi_torrent_images.lower()
+ xenapi_torrent_images = CONF.xenapi_torrent_images.lower()
if xenapi_torrent_images == 'all':
bittorrent = True
elif xenapi_torrent_images == 'some':
- # FIXME(sirp): This should be eager loaded like instance metadata
- sys_meta = db.instance_system_metadata_get(context,
- instance['uuid'])
+ sys_meta = _system_metadata_to_dict(instance['system_metadata'])
try:
bittorrent = utils.bool_from_str(sys_meta['image_bittorrent'])
except KeyError:
@@ -1047,19 +1069,19 @@ def _fetch_vhd_image(context, session, instance, image_id):
if _image_uses_bittorrent(context, instance):
plugin_name = 'bittorrent'
callback = None
- params['torrent_base_url'] = FLAGS.xenapi_torrent_base_url
- params['torrent_seed_duration'] = FLAGS.xenapi_torrent_seed_duration
- params['torrent_seed_chance'] = FLAGS.xenapi_torrent_seed_chance
+ params['torrent_base_url'] = CONF.xenapi_torrent_base_url
+ params['torrent_seed_duration'] = CONF.xenapi_torrent_seed_duration
+ params['torrent_seed_chance'] = CONF.xenapi_torrent_seed_chance
params['torrent_max_last_accessed'] =\
- FLAGS.xenapi_torrent_max_last_accessed
+ CONF.xenapi_torrent_max_last_accessed
params['torrent_listen_port_start'] =\
- FLAGS.xenapi_torrent_listen_port_start
+ CONF.xenapi_torrent_listen_port_start
params['torrent_listen_port_end'] =\
- FLAGS.xenapi_torrent_listen_port_end
+ CONF.xenapi_torrent_listen_port_end
params['torrent_download_stall_cutoff'] =\
- FLAGS.xenapi_torrent_download_stall_cutoff
+ CONF.xenapi_torrent_download_stall_cutoff
params['torrent_max_seeder_processes_per_host'] =\
- FLAGS.xenapi_torrent_max_seeder_processes_per_host
+ CONF.xenapi_torrent_max_seeder_processes_per_host
else:
plugin_name = 'glance'
glance_api_servers = glance.get_api_servers()
@@ -1162,8 +1184,8 @@ def _fetch_disk_image(context, session, instance, name_label, image_id,
# Make room for MBR.
vdi_size += MBR_SIZE_BYTES
elif (image_type in (ImageType.KERNEL, ImageType.RAMDISK) and
- vdi_size > FLAGS.max_kernel_ramdisk_size):
- max_size = FLAGS.max_kernel_ramdisk_size
+ vdi_size > CONF.max_kernel_ramdisk_size):
+ max_size = CONF.max_kernel_ramdisk_size
raise exception.NovaException(
_("Kernel/Ramdisk image is too large: %(vdi_size)d bytes, "
"max %(max_size)d bytes") % locals())
@@ -1192,7 +1214,7 @@ def _fetch_disk_image(context, session, instance, name_label, image_id,
# Let the plugin copy the correct number of bytes.
args['image-size'] = str(vdi_size)
- if FLAGS.cache_images:
+ if CONF.cache_images:
args['cached-image'] = image_id
filename = session.call_plugin('kernel', 'copy_vdi', args)
@@ -1461,13 +1483,13 @@ def _find_sr(session):
"""Return the storage repository to hold VM images"""
host = session.get_xenapi_host()
try:
- tokens = FLAGS.sr_matching_filter.split(':')
+ tokens = CONF.sr_matching_filter.split(':')
filter_criteria = tokens[0]
filter_pattern = tokens[1]
except IndexError:
# oops, flag is invalid
LOG.warning(_("Flag sr_matching_filter '%s' does not respect "
- "formatting convention"), FLAGS.sr_matching_filter)
+ "formatting convention"), CONF.sr_matching_filter)
return None
if filter_criteria == 'other-config':
@@ -1535,7 +1557,7 @@ def _find_iso_sr(session):
def _get_rrd_server():
"""Return server's scheme and address to use for retrieving RRD XMLs."""
- xs_url = urlparse.urlparse(FLAGS.xenapi_connection_url)
+ xs_url = urlparse.urlparse(CONF.xenapi_connection_url)
return [xs_url.scheme, xs_url.netloc]
@@ -1544,8 +1566,8 @@ def _get_rrd(server, vm_uuid):
try:
xml = urllib.urlopen("%s://%s:%s@%s/vm_rrd?uuid=%s" % (
server[0],
- FLAGS.xenapi_connection_username,
- FLAGS.xenapi_connection_password,
+ CONF.xenapi_connection_username,
+ CONF.xenapi_connection_password,
server[1],
vm_uuid))
return xml.read()
@@ -1560,8 +1582,8 @@ def _get_rrd_updates(server, start_time):
try:
xml = urllib.urlopen("%s://%s:%s@%s/rrd_updates?start=%s" % (
server[0],
- FLAGS.xenapi_connection_username,
- FLAGS.xenapi_connection_password,
+ CONF.xenapi_connection_username,
+ CONF.xenapi_connection_password,
server[1],
start_time))
return xml.read()
@@ -1762,7 +1784,7 @@ def _wait_for_vhd_coalesce(session, instance, sr_ref, vdi_ref,
# matches the underlying VHDs.
_scan_sr(session, sr_ref)
- max_attempts = FLAGS.xenapi_vhd_coalesce_max_attempts
+ max_attempts = CONF.xenapi_vhd_coalesce_max_attempts
for i in xrange(max_attempts):
_scan_sr(session, sr_ref)
parent_uuid = _get_vhd_parent_uuid(session, vdi_ref)
@@ -1775,7 +1797,7 @@ def _wait_for_vhd_coalesce(session, instance, sr_ref, vdi_ref,
base_uuid = _get_vhd_parent_uuid(session, parent_ref)
return parent_uuid, base_uuid
- greenthread.sleep(FLAGS.xenapi_vhd_coalesce_poll_interval)
+ greenthread.sleep(CONF.xenapi_vhd_coalesce_poll_interval)
msg = (_("VHD coalesce attempts exceeded (%(max_attempts)d)"
", giving up...") % locals())
@@ -1792,12 +1814,12 @@ def _remap_vbd_dev(dev):
For now, we work around it by just doing a string replace.
"""
# NOTE(sirp): This hack can go away when we pull support for Maverick
- should_remap = FLAGS.xenapi_remap_vbd_dev
+ should_remap = CONF.xenapi_remap_vbd_dev
if not should_remap:
return dev
old_prefix = 'xvd'
- new_prefix = FLAGS.xenapi_remap_vbd_dev_prefix
+ new_prefix = CONF.xenapi_remap_vbd_dev_prefix
remapped_dev = dev.replace(old_prefix, new_prefix)
return remapped_dev
@@ -1805,7 +1827,7 @@ def _remap_vbd_dev(dev):
def _wait_for_device(dev):
"""Wait for device node to appear"""
- for i in xrange(0, FLAGS.block_device_creation_timeout):
+ for i in xrange(0, CONF.block_device_creation_timeout):
dev_path = utils.make_dev_path(dev)
if os.path.exists(dev_path):
return
@@ -2047,7 +2069,7 @@ def _copy_partition(session, src_ref, dst_ref, partition, virtual_size):
_write_partition(virtual_size, dst)
- if FLAGS.xenapi_sparse_copy:
+ if CONF.xenapi_sparse_copy:
_sparse_copy(src_path, dst_path, virtual_size)
else:
num_blocks = virtual_size / SECTOR_SIZE
@@ -2102,7 +2124,7 @@ def _prepare_injectables(inst, network_info):
#only if injection is performed
from Cheetah import Template as t
template = t.Template
- template_data = open(FLAGS.injected_network_template).read()
+ template_data = open(CONF.injected_network_template).read()
metadata = inst['metadata']
key = str(inst['key_data'])
@@ -2137,7 +2159,7 @@ def _prepare_injectables(inst, network_info):
'address_v6': '',
'netmask_v6': '',
'gateway_v6': '',
- 'use_ipv6': FLAGS.use_ipv6}
+ 'use_ipv6': CONF.use_ipv6}
# NOTE(tr3buchet): the original code used the old network_info
# which only supported a single ipv4 subnet
@@ -2187,7 +2209,7 @@ def _prepare_injectables(inst, network_info):
if interfaces_info:
net = str(template(template_data,
searchList=[{'interfaces': interfaces_info,
- 'use_ipv6': FLAGS.use_ipv6}]))
+ 'use_ipv6': CONF.use_ipv6}]))
return key, net, metadata
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 915c45243..cbbd15e1a 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -32,9 +32,7 @@ from nova.compute import vm_mode
from nova.compute import vm_states
from nova import config
from nova import context as nova_context
-from nova import db
from nova import exception
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common import importutils
@@ -61,10 +59,8 @@ xenapi_vmops_opts = [
help='The XenAPI VIF driver using XenServer Network APIs.')
]
-FLAGS = flags.FLAGS
-FLAGS.register_opts(xenapi_vmops_opts)
-
CONF = config.CONF
+CONF.register_opts(xenapi_vmops_opts)
CONF.import_opt('vncserver_proxyclient_address', 'nova.vnc')
DEFAULT_FIREWALL_DRIVER = "%s.%s" % (
@@ -153,15 +149,16 @@ class VMOps(object):
self._virtapi = virtapi
self.poll_rescue_last_ran = None
self.firewall_driver = firewall.load_driver(
- default=DEFAULT_FIREWALL_DRIVER,
+ DEFAULT_FIREWALL_DRIVER,
+ self._virtapi,
xenapi_session=self._session)
- vif_impl = importutils.import_class(FLAGS.xenapi_vif_driver)
+ vif_impl = importutils.import_class(CONF.xenapi_vif_driver)
self.vif_driver = vif_impl(xenapi_session=self._session)
self.default_root_dev = '/dev/sda'
@property
def agent_enabled(self):
- return not FLAGS.xenapi_disable_agent
+ return not CONF.xenapi_disable_agent
def _get_agent(self, instance, vm_ref):
if self.agent_enabled:
@@ -240,10 +237,10 @@ class VMOps(object):
False, False)
def _create_disks(self, context, instance, name_label, disk_image_type,
- block_device_info=None):
+ image_meta, block_device_info=None):
vdis = vm_utils.get_vdis_for_instance(context, self._session,
instance, name_label,
- instance['image_ref'],
+ image_meta['id'],
disk_image_type,
block_device_info=block_device_info)
# Just get the VDI ref once
@@ -271,9 +268,10 @@ class VMOps(object):
return vm_utils.determine_disk_image_type(image_meta)
@step
- def create_disks_step(undo_mgr, disk_image_type):
+ def create_disks_step(undo_mgr, disk_image_type, image_meta):
vdis = self._create_disks(context, instance, name_label,
- disk_image_type, block_device_info)
+ disk_image_type, image_meta,
+ block_device_info)
def undo_create_disks():
vdi_refs = [vdi['ref'] for vdi in vdis.values()
@@ -389,7 +387,7 @@ class VMOps(object):
bdev_set_default_root(undo_mgr)
disk_image_type = determine_disk_image_type_step(undo_mgr)
- vdis = create_disks_step(undo_mgr, disk_image_type)
+ vdis = create_disks_step(undo_mgr, disk_image_type, image_meta)
kernel_file, ramdisk_file = create_kernel_ramdisk_step(undo_mgr)
vm_ref = create_vm_record_step(undo_mgr, vdis, disk_image_type,
kernel_file, ramdisk_file)
@@ -424,7 +422,7 @@ class VMOps(object):
def _setup_vm_networking(self, instance, vm_ref, vdis, network_info,
rescue):
# Alter the image before VM start for network injection.
- if FLAGS.flat_injected:
+ if CONF.flat_injected:
vm_utils.preconfigure_instance(self._session, instance,
vdis['root']['ref'], network_info)
@@ -529,7 +527,7 @@ class VMOps(object):
# Wait for boot to finish
LOG.debug(_('Waiting for instance state to become running'),
instance=instance)
- expiration = time.time() + FLAGS.xenapi_running_timeout
+ expiration = time.time() + CONF.xenapi_running_timeout
while time.time() < expiration:
state = self.get_info(instance, vm_ref)['state']
if state == power_state.RUNNING:
@@ -538,7 +536,7 @@ class VMOps(object):
greenthread.sleep(0.5)
if self.agent_enabled:
- agent_build = db.agent_build_get_by_triple(
+ agent_build = self._virtapi.agent_build_get_by_triple(
ctx, 'xen', instance['os_type'], instance['architecture'])
if agent_build:
LOG.info(_('Latest agent build for %(hypervisor)s/%(os)s'
@@ -694,8 +692,10 @@ class VMOps(object):
instance=instance)
# 2. Power down the instance before resizing
- vm_utils.shutdown_vm(
- self._session, instance, vm_ref, hard=False)
+ if not vm_utils.clean_shutdown_vm(self._session, instance, vm_ref):
+ LOG.debug(_("Clean shutdown did not complete successfully, "
+ "trying hard shutdown."), instance=instance)
+ vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
self._update_instance_progress(context, instance,
step=2,
total_steps=RESIZE_TOTAL_STEPS)
@@ -742,8 +742,10 @@ class VMOps(object):
total_steps=RESIZE_TOTAL_STEPS)
# 3. Now power down the instance
- vm_utils.shutdown_vm(
- self._session, instance, vm_ref, hard=False)
+ if not vm_utils.clean_shutdown_vm(self._session, instance, vm_ref):
+ LOG.debug(_("Clean shutdown did not complete successfully, "
+ "trying hard shutdown."), instance=instance)
+ vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
self._update_instance_progress(context, instance,
step=3,
total_steps=RESIZE_TOTAL_STEPS)
@@ -1076,7 +1078,7 @@ class VMOps(object):
instance=instance)
return
- vm_utils.shutdown_vm(self._session, instance, vm_ref)
+ vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
# Destroy VDIs
self._detach_vm_vols(instance, vm_ref, block_device_info)
@@ -1127,7 +1129,7 @@ class VMOps(object):
% instance['name'])
vm_ref = self._get_vm_opaque_ref(instance)
- vm_utils.shutdown_vm(self._session, instance, vm_ref)
+ vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
self._acquire_bootlock(vm_ref)
self.spawn(context, instance, image_meta, [], rescue_password,
network_info, name_label=rescue_name_label, rescue=True)
@@ -1160,7 +1162,7 @@ class VMOps(object):
LOG.warning(_("VM is not present, skipping soft delete..."),
instance=instance)
else:
- vm_utils.shutdown_vm(self._session, instance, vm_ref, hard=True)
+ vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
self._acquire_bootlock(vm_ref)
def restore(self, instance):
@@ -1172,7 +1174,7 @@ class VMOps(object):
def power_off(self, instance):
"""Power off the specified instance."""
vm_ref = self._get_vm_opaque_ref(instance)
- vm_utils.shutdown_vm(self._session, instance, vm_ref, hard=True)
+ vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
def power_on(self, instance):
"""Power on the specified instance."""
@@ -1317,7 +1319,7 @@ class VMOps(object):
path = "/console?ref=%s&session_id=%s" % (str(vm_ref), session_id)
# NOTE: XS5.6sp2+ use http over port 80 for xenapi com
- return {'host': FLAGS.vncserver_proxyclient_address, 'port': 80,
+ return {'host': CONF.vncserver_proxyclient_address, 'port': 80,
'internal_access_path': path}
def _vif_xenstore_data(self, vif):
@@ -1540,10 +1542,10 @@ class VMOps(object):
network_info=network_info)
def _get_host_uuid_from_aggregate(self, context, hostname):
- current_aggregate = db.aggregate_get_by_host(context, FLAGS.host,
- key=pool_states.POOL_FLAG)[0]
+ current_aggregate = self._virtapi.aggregate_get_by_host(
+ context, CONF.host, key=pool_states.POOL_FLAG)[0]
if not current_aggregate:
- raise exception.AggregateHostNotFound(host=FLAGS.host)
+ raise exception.AggregateHostNotFound(host=CONF.host)
try:
return current_aggregate.metadetails[hostname]
except KeyError:
diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py
index 7de29bf24..1ca3ea21f 100644
--- a/nova/virt/xenapi/volume_utils.py
+++ b/nova/virt/xenapi/volume_utils.py
@@ -22,11 +22,11 @@ and storage repositories
import re
import string
-from nova import flags
+from nova import config
from nova.openstack.common import log as logging
-FLAGS = flags.FLAGS
+CONF = config.CONF
LOG = logging.getLogger(__name__)
@@ -359,22 +359,22 @@ def _get_target_host(iscsi_string):
"""Retrieve target host"""
if iscsi_string:
return iscsi_string[0:iscsi_string.find(':')]
- elif iscsi_string is None or FLAGS.target_host:
- return FLAGS.target_host
+ elif iscsi_string is None or CONF.target_host:
+ return CONF.target_host
def _get_target_port(iscsi_string):
"""Retrieve target port"""
if iscsi_string:
return iscsi_string[iscsi_string.find(':') + 1:]
- elif iscsi_string is None or FLAGS.target_port:
- return FLAGS.target_port
+ elif iscsi_string is None or CONF.target_port:
+ return CONF.target_port
def _get_iqn(iscsi_string, id):
"""Retrieve target IQN"""
if iscsi_string:
return iscsi_string
- elif iscsi_string is None or FLAGS.iqn_prefix:
+ elif iscsi_string is None or CONF.iqn_prefix:
volume_id = _get_volume_id(id)
- return '%s:%s' % (FLAGS.iqn_prefix, volume_id)
+ return '%s:%s' % (CONF.iqn_prefix, volume_id)
diff --git a/nova/vnc/__init__.py b/nova/vnc/__init__.py
index 29ef4f248..ffc93abcd 100644
--- a/nova/vnc/__init__.py
+++ b/nova/vnc/__init__.py
@@ -19,7 +19,6 @@
"""Module for VNC Proxying."""
from nova import config
-from nova import flags
from nova.openstack.common import cfg
diff --git a/nova/vnc/xvp_proxy.py b/nova/vnc/xvp_proxy.py
index e3a1595d1..947b91d11 100644
--- a/nova/vnc/xvp_proxy.py
+++ b/nova/vnc/xvp_proxy.py
@@ -29,7 +29,6 @@ import eventlet.wsgi
from nova import config
from nova.consoleauth import rpcapi as consoleauth_rpcapi
from nova import context
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import version
diff --git a/nova/volume/__init__.py b/nova/volume/__init__.py
index 1eedd199d..cc7dcf4d9 100644
--- a/nova/volume/__init__.py
+++ b/nova/volume/__init__.py
@@ -19,7 +19,6 @@
# Importing full names to not pollute the namespace and cause possible
# collisions with use of 'from nova.volume import <foo>' elsewhere.
import nova.config
-import nova.flags
import nova.openstack.common.importutils
diff --git a/nova/volume/cinder.py b/nova/volume/cinder.py
index 45a2b1693..67e303e01 100644
--- a/nova/volume/cinder.py
+++ b/nova/volume/cinder.py
@@ -27,7 +27,6 @@ from cinderclient.v1 import client as cinder_client
from nova import config
from nova.db import base
from nova import exception
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
diff --git a/nova/volume/driver.py b/nova/volume/driver.py
deleted file mode 100644
index 07bbbde6c..000000000
--- a/nova/volume/driver.py
+++ /dev/null
@@ -1,954 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-Drivers for volumes.
-
-"""
-
-import os
-import tempfile
-import time
-import urllib
-
-from nova import config
-from nova import exception
-from nova import flags
-from nova.openstack.common import cfg
-from nova.openstack.common import log as logging
-from nova import utils
-from nova.volume import iscsi
-
-
-LOG = logging.getLogger(__name__)
-
-volume_opts = [
- cfg.StrOpt('volume_group',
- default='nova-volumes',
- help='Name for the VG that will contain exported volumes'),
- cfg.IntOpt('num_shell_tries',
- default=3,
- help='number of times to attempt to run flakey shell commands'),
- cfg.IntOpt('num_iscsi_scan_tries',
- default=3,
- help='number of times to rescan iSCSI target to find volume'),
- cfg.IntOpt('iscsi_num_targets',
- default=100,
- help='Number of iscsi target ids per host'),
- cfg.StrOpt('iscsi_target_prefix',
- default='iqn.2010-10.org.openstack:',
- help='prefix for iscsi volumes'),
- cfg.StrOpt('iscsi_ip_address',
- default='$my_ip',
- help='use this ip for iscsi'),
- cfg.IntOpt('iscsi_port',
- default=3260,
- help='The port that the iSCSI daemon is listening on'),
- cfg.StrOpt('rbd_pool',
- default='rbd',
- help='the RADOS pool in which rbd volumes are stored'),
- cfg.StrOpt('rbd_user',
- default=None,
- help='the RADOS client name for accessing rbd volumes'),
- cfg.StrOpt('rbd_secret_uuid',
- default=None,
- help='the libvirt uuid of the secret for the rbd_user'
- 'volumes'),
- cfg.StrOpt('volume_tmp_dir',
- default=None,
- help='where to store temporary image files if the volume '
- 'driver does not write them directly to the volume'),
- ]
-
-CONF = config.CONF
-CONF.register_opts(volume_opts)
-
-
-class VolumeDriver(object):
- """Executes commands relating to Volumes."""
- def __init__(self, execute=utils.execute, *args, **kwargs):
- # NOTE(vish): db is set by Manager
- self.db = None
- self.set_execute(execute)
-
- def set_execute(self, execute):
- self._execute = execute
-
- def _try_execute(self, *command, **kwargs):
- # NOTE(vish): Volume commands can partially fail due to timing, but
- # running them a second time on failure will usually
- # recover nicely.
- tries = 0
- while True:
- try:
- self._execute(*command, **kwargs)
- return True
- except exception.ProcessExecutionError:
- tries = tries + 1
- if tries >= CONF.num_shell_tries:
- raise
- LOG.exception(_("Recovering from a failed execute. "
- "Try number %s"), tries)
- time.sleep(tries ** 2)
-
- def check_for_setup_error(self):
- """Returns an error if prerequisites aren't met"""
- out, err = self._execute('vgs', '--noheadings', '-o', 'name',
- run_as_root=True)
- volume_groups = out.split()
- if not CONF.volume_group in volume_groups:
- exception_message = (_("volume group %s doesn't exist")
- % CONF.volume_group)
- raise exception.VolumeBackendAPIException(data=exception_message)
-
- def _create_volume(self, volume_name, sizestr):
- self._try_execute('lvcreate', '-L', sizestr, '-n',
- volume_name, CONF.volume_group, run_as_root=True)
-
- def _copy_volume(self, srcstr, deststr, size_in_g):
- # Use O_DIRECT to avoid thrashing the system buffer cache
- direct_flags = ('iflag=direct', 'oflag=direct')
-
- # Check whether O_DIRECT is supported
- try:
- self._execute('dd', 'count=0', 'if=%s' % srcstr, 'of=%s' % deststr,
- *direct_flags, run_as_root=True)
- except exception.ProcessExecutionError:
- direct_flags = ()
-
- # Perform the copy
- self._execute('dd', 'if=%s' % srcstr, 'of=%s' % deststr,
- 'count=%d' % (size_in_g * 1024), 'bs=1M',
- *direct_flags, run_as_root=True)
-
- def _volume_not_present(self, volume_name):
- path_name = '%s/%s' % (CONF.volume_group, volume_name)
- try:
- self._try_execute('lvdisplay', path_name, run_as_root=True)
- except Exception as e:
- # If the volume isn't present
- return True
- return False
-
- def _delete_volume(self, volume, size_in_g):
- """Deletes a logical volume."""
- # zero out old volumes to prevent data leaking between users
- # TODO(ja): reclaiming space should be done lazy and low priority
- self._copy_volume('/dev/zero', self.local_path(volume), size_in_g)
- dev_path = self.local_path(volume)
- if os.path.exists(dev_path):
- self._try_execute('dmsetup', 'remove', '-f', dev_path,
- run_as_root=True)
- self._try_execute('lvremove', '-f', "%s/%s" %
- (CONF.volume_group,
- self._escape_snapshot(volume['name'])),
- run_as_root=True)
-
- def _sizestr(self, size_in_g):
- if int(size_in_g) == 0:
- return '100M'
- return '%sG' % size_in_g
-
- # Linux LVM reserves name that starts with snapshot, so that
- # such volume name can't be created. Mangle it.
- def _escape_snapshot(self, snapshot_name):
- if not snapshot_name.startswith('snapshot'):
- return snapshot_name
- return '_' + snapshot_name
-
- def create_volume(self, volume):
- """Creates a logical volume. Can optionally return a Dictionary of
- changes to the volume object to be persisted."""
- self._create_volume(volume['name'], self._sizestr(volume['size']))
-
- def create_volume_from_snapshot(self, volume, snapshot):
- """Creates a volume from a snapshot."""
- self._create_volume(volume['name'], self._sizestr(volume['size']))
- self._copy_volume(self.local_path(snapshot), self.local_path(volume),
- snapshot['volume_size'])
-
- def delete_volume(self, volume):
- """Deletes a logical volume."""
- if self._volume_not_present(volume['name']):
- # If the volume isn't present, then don't attempt to delete
- return True
-
- # TODO(yamahata): lvm can't delete origin volume only without
- # deleting derived snapshots. Can we do something fancy?
- out, err = self._execute('lvdisplay', '--noheading',
- '-C', '-o', 'Attr',
- '%s/%s' % (CONF.volume_group,
- volume['name']),
- run_as_root=True)
- # fake_execute returns None resulting unit test error
- if out:
- out = out.strip()
- if (out[0] == 'o') or (out[0] == 'O'):
- raise exception.VolumeIsBusy(volume_name=volume['name'])
-
- self._delete_volume(volume, volume['size'])
-
- def create_snapshot(self, snapshot):
- """Creates a snapshot."""
- orig_lv_name = "%s/%s" % (CONF.volume_group, snapshot['volume_name'])
- self._try_execute('lvcreate', '-L',
- self._sizestr(snapshot['volume_size']),
- '--name', self._escape_snapshot(snapshot['name']),
- '--snapshot', orig_lv_name, run_as_root=True)
-
- def delete_snapshot(self, snapshot):
- """Deletes a snapshot."""
- if self._volume_not_present(self._escape_snapshot(snapshot['name'])):
- # If the snapshot isn't present, then don't attempt to delete
- return True
-
- # TODO(yamahata): zeroing out the whole snapshot triggers COW.
- # it's quite slow.
- self._delete_volume(snapshot, snapshot['volume_size'])
-
- def local_path(self, volume):
- # NOTE(vish): stops deprecation warning
- escaped_group = CONF.volume_group.replace('-', '--')
- escaped_name = self._escape_snapshot(volume['name']).replace('-', '--')
- return "/dev/mapper/%s-%s" % (escaped_group, escaped_name)
-
- def ensure_export(self, context, volume):
- """Synchronously recreates an export for a logical volume."""
- raise NotImplementedError()
-
- def create_export(self, context, volume):
- """Exports the volume. Can optionally return a Dictionary of changes
- to the volume object to be persisted."""
- raise NotImplementedError()
-
- def remove_export(self, context, volume):
- """Removes an export for a logical volume."""
- raise NotImplementedError()
-
- def check_for_export(self, context, volume_id):
- """Make sure volume is exported."""
- raise NotImplementedError()
-
- def initialize_connection(self, volume, connector):
- """Allow connection to connector and return connection info."""
- raise NotImplementedError()
-
- def terminate_connection(self, volume, connector):
- """Disallow connection from connector"""
- raise NotImplementedError()
-
- def attach_volume(self, context, volume_id, instance_uuid, mountpoint):
- """ Callback for volume attached to instance."""
- pass
-
- def detach_volume(self, context, volume_id):
- """ Callback for volume detached."""
- pass
-
- def get_volume_stats(self, refresh=False):
- """Return the current state of the volume service. If 'refresh' is
- True, run the update first."""
- return None
-
- def do_setup(self, context):
- """Any initialization the volume driver does while starting"""
- pass
-
- def copy_image_to_volume(self, context, volume, image_service, image_id):
- """Fetch the image from image_service and write it to the volume."""
- raise NotImplementedError()
-
- def copy_volume_to_image(self, context, volume, image_service, image_id):
- """Copy the volume to the specified image."""
- raise NotImplementedError()
-
- def clone_image(self, volume, image_location):
- """Create a volume efficiently from an existing image.
-
- image_location is a string whose format depends on the
- image service backend in use. The driver should use it
- to determine whether cloning is possible.
-
- Returns a boolean indicating whether cloning occurred
- """
- return False
-
-
-class ISCSIDriver(VolumeDriver):
- """Executes commands relating to ISCSI volumes.
-
- We make use of model provider properties as follows:
-
- ``provider_location``
- if present, contains the iSCSI target information in the same
- format as an ietadm discovery
- i.e. '<ip>:<port>,<portal> <target IQN>'
-
- ``provider_auth``
- if present, contains a space-separated triple:
- '<auth method> <auth username> <auth password>'.
- `CHAP` is the only auth_method in use at the moment.
- """
-
- def __init__(self, *args, **kwargs):
- self.tgtadm = iscsi.get_target_admin()
- super(ISCSIDriver, self).__init__(*args, **kwargs)
-
- def set_execute(self, execute):
- super(ISCSIDriver, self).set_execute(execute)
- self.tgtadm.set_execute(execute)
-
- def ensure_export(self, context, volume):
- """Synchronously recreates an export for a logical volume."""
- # NOTE(jdg): tgtadm doesn't use the iscsi_targets table
- # TODO(jdg): In the future move all of the dependent stuff into the
- # cooresponding target admin class
- if not isinstance(self.tgtadm, iscsi.TgtAdm):
- try:
- iscsi_target = self.db.volume_get_iscsi_target_num(context,
- volume['id'])
- except exception.NotFound:
- LOG.info(_("Skipping ensure_export. No iscsi_target "
- "provisioned for volume: %s"), volume['id'])
- return
- else:
- iscsi_target = 1 # dummy value when using TgtAdm
-
- iscsi_name = "%s%s" % (CONF.iscsi_target_prefix, volume['name'])
- volume_path = "/dev/%s/%s" % (CONF.volume_group, volume['name'])
-
- # NOTE(jdg): For TgtAdm case iscsi_name is the ONLY param we need
- # should clean this all up at some point in the future
- self.tgtadm.create_iscsi_target(iscsi_name, iscsi_target,
- 0, volume_path,
- check_exit_code=False)
-
- def _ensure_iscsi_targets(self, context, host):
- """Ensure that target ids have been created in datastore."""
- # NOTE(jdg): tgtadm doesn't use the iscsi_targets table
- # TODO(jdg): In the future move all of the dependent stuff into the
- # cooresponding target admin class
- if not isinstance(self.tgtadm, iscsi.TgtAdm):
- host_iscsi_targets = self.db.iscsi_target_count_by_host(context,
- host)
- if host_iscsi_targets >= CONF.iscsi_num_targets:
- return
-
- # NOTE(vish): Target ids start at 1, not 0.
- for target_num in xrange(1, CONF.iscsi_num_targets + 1):
- target = {'host': host, 'target_num': target_num}
- self.db.iscsi_target_create_safe(context, target)
-
- def create_export(self, context, volume):
- """Creates an export for a logical volume."""
- #BOOKMARK(jdg)
-
- iscsi_name = "%s%s" % (CONF.iscsi_target_prefix, volume['name'])
- volume_path = "/dev/%s/%s" % (CONF.volume_group, volume['name'])
-
- model_update = {}
-
- # TODO(jdg): In the future move all of the dependent stuff into the
- # cooresponding target admin class
- if not isinstance(self.tgtadm, iscsi.TgtAdm):
- lun = 0
- self._ensure_iscsi_targets(context, volume['host'])
- iscsi_target = self.db.volume_allocate_iscsi_target(context,
- volume['id'],
- volume['host'])
- else:
- lun = 1 # For tgtadm the controller is lun 0, dev starts at lun 1
- iscsi_target = 0 # NOTE(jdg): Not used by tgtadm
-
- # NOTE(jdg): For TgtAdm case iscsi_name is the ONLY param we need
- # should clean this all up at some point in the future
- tid = self.tgtadm.create_iscsi_target(iscsi_name,
- iscsi_target,
- 0,
- volume_path)
- model_update['provider_location'] = _iscsi_location(
- CONF.iscsi_ip_address, tid, iscsi_name, lun)
- return model_update
-
- def remove_export(self, context, volume):
- """Removes an export for a logical volume."""
-
- # NOTE(jdg): tgtadm doesn't use the iscsi_targets table
- # TODO(jdg): In the future move all of the dependent stuff into the
- # cooresponding target admin class
- if not isinstance(self.tgtadm, iscsi.TgtAdm):
- try:
- iscsi_target = self.db.volume_get_iscsi_target_num(context,
- volume['id'])
- except exception.NotFound:
- LOG.info(_("Skipping remove_export. No iscsi_target "
- "provisioned for volume: %s"), volume['id'])
- return
- else:
- iscsi_target = 0
-
- try:
-
- # NOTE: provider_location may be unset if the volume hasn't
- # been exported
- location = volume['provider_location'].split(' ')
- iqn = location[1]
-
- # ietadm show will exit with an error
- # this export has already been removed
- self.tgtadm.show_target(iscsi_target, iqn=iqn)
- except Exception as e:
- LOG.info(_("Skipping remove_export. No iscsi_target "
- "is presently exported for volume: %s"), volume['id'])
- return
-
- self.tgtadm.remove_iscsi_target(iscsi_target, 0, volume['id'])
-
- def _do_iscsi_discovery(self, volume):
- #TODO(justinsb): Deprecate discovery and use stored info
- #NOTE(justinsb): Discovery won't work with CHAP-secured targets (?)
- LOG.warn(_("ISCSI provider_location not stored, using discovery"))
-
- volume_name = volume['name']
-
- (out, _err) = self._execute('iscsiadm', '-m', 'discovery',
- '-t', 'sendtargets', '-p', volume['host'],
- run_as_root=True)
- for target in out.splitlines():
- if CONF.iscsi_ip_address in target and volume_name in target:
- return target
- return None
-
- def _get_iscsi_properties(self, volume):
- """Gets iscsi configuration
-
- We ideally get saved information in the volume entity, but fall back
- to discovery if need be. Discovery may be completely removed in future
- The properties are:
-
- :target_discovered: boolean indicating whether discovery was used
-
- :target_iqn: the IQN of the iSCSI target
-
- :target_portal: the portal of the iSCSI target
-
- :target_lun: the lun of the iSCSI target
-
- :volume_id: the id of the volume (currently used by xen)
-
- :auth_method:, :auth_username:, :auth_password:
-
- the authentication details. Right now, either auth_method is not
- present meaning no authentication, or auth_method == `CHAP`
- meaning use CHAP with the specified credentials.
- """
-
- properties = {}
-
- location = volume['provider_location']
-
- if location:
- # provider_location is the same format as iSCSI discovery output
- properties['target_discovered'] = False
- else:
- location = self._do_iscsi_discovery(volume)
-
- if not location:
- raise exception.InvalidVolume(_("Could not find iSCSI export "
- " for volume %s") %
- (volume['name']))
-
- LOG.debug(_("ISCSI Discovery: Found %s") % (location))
- properties['target_discovered'] = True
-
- results = location.split(" ")
- properties['target_portal'] = results[0].split(",")[0]
- properties['target_iqn'] = results[1]
- try:
- properties['target_lun'] = int(results[2])
- except (IndexError, ValueError):
- if CONF.iscsi_helper == 'tgtadm':
- properties['target_lun'] = 1
- else:
- properties['target_lun'] = 0
-
- properties['volume_id'] = volume['id']
-
- auth = volume['provider_auth']
- if auth:
- (auth_method, auth_username, auth_secret) = auth.split()
-
- properties['auth_method'] = auth_method
- properties['auth_username'] = auth_username
- properties['auth_password'] = auth_secret
-
- return properties
-
- def _run_iscsiadm(self, iscsi_properties, iscsi_command):
- (out, err) = self._execute('iscsiadm', '-m', 'node', '-T',
- iscsi_properties['target_iqn'],
- '-p', iscsi_properties['target_portal'],
- *iscsi_command, run_as_root=True)
- LOG.debug("iscsiadm %s: stdout=%s stderr=%s" %
- (iscsi_command, out, err))
- return (out, err)
-
- def _iscsiadm_update(self, iscsi_properties, property_key, property_value):
- iscsi_command = ('--op', 'update', '-n', property_key,
- '-v', property_value)
- return self._run_iscsiadm(iscsi_properties, iscsi_command)
-
- def initialize_connection(self, volume, connector):
- """Initializes the connection and returns connection info.
-
- The iscsi driver returns a driver_volume_type of 'iscsi'.
- The format of the driver data is defined in _get_iscsi_properties.
- Example return value::
-
- {
- 'driver_volume_type': 'iscsi'
- 'data': {
- 'target_discovered': True,
- 'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001',
- 'target_portal': '127.0.0.0.1:3260',
- 'volume_id': 1,
- }
- }
-
- """
-
- iscsi_properties = self._get_iscsi_properties(volume)
- return {
- 'driver_volume_type': 'iscsi',
- 'data': iscsi_properties
- }
-
- def terminate_connection(self, volume, connector):
- pass
-
- def check_for_export(self, context, volume_id):
- """Make sure volume is exported."""
- vol_uuid_file = 'volume-%s' % volume_id
- volume_path = os.path.join(CONF.volumes_dir, vol_uuid_file)
- if os.path.isfile(volume_path):
- iqn = '%s%s' % (CONF.iscsi_target_prefix,
- vol_uuid_file)
- else:
- raise exception.PersistentVolumeFileNotFound(volume_id=volume_id)
-
- # TODO(jdg): In the future move all of the dependent stuff into the
- # cooresponding target admin class
- if not isinstance(self.tgtadm, iscsi.TgtAdm):
- tid = self.db.volume_get_iscsi_target_num(context, volume_id)
- else:
- tid = 0
-
- try:
- self.tgtadm.show_target(tid, iqn=iqn)
- except exception.ProcessExecutionError, e:
- # Instances remount read-only in this case.
- # /etc/init.d/iscsitarget restart and rebooting nova-volume
- # is better since ensure_export() works at boot time.
- LOG.error(_("Cannot confirm exported volume "
- "id:%(volume_id)s.") % locals())
- raise
-
- def copy_image_to_volume(self, context, volume, image_service, image_id):
- """Fetch the image from image_service and write it to the volume."""
- volume_path = self.local_path(volume)
- with utils.temporary_chown(volume_path):
- with utils.file_open(volume_path, "wb") as image_file:
- image_service.download(context, image_id, image_file)
-
- def copy_volume_to_image(self, context, volume, image_service, image_id):
- """Copy the volume to the specified image."""
- volume_path = self.local_path(volume)
- with utils.temporary_chown(volume_path):
- with utils.file_open(volume_path) as volume_file:
- image_service.update(context, image_id, {}, volume_file)
-
-
-class FakeISCSIDriver(ISCSIDriver):
- """Logs calls instead of executing."""
- def __init__(self, *args, **kwargs):
- super(FakeISCSIDriver, self).__init__(execute=self.fake_execute,
- *args, **kwargs)
-
- def check_for_setup_error(self):
- """No setup necessary in fake mode."""
- pass
-
- def initialize_connection(self, volume, connector):
- return {
- 'driver_volume_type': 'iscsi',
- 'data': {}
- }
-
- def terminate_connection(self, volume, connector):
- pass
-
- @staticmethod
- def fake_execute(cmd, *_args, **_kwargs):
- """Execute that simply logs the command."""
- LOG.debug(_("FAKE ISCSI: %s"), cmd)
- return (None, None)
-
-
-class RBDDriver(VolumeDriver):
- """Implements RADOS block device (RBD) volume commands"""
-
- def check_for_setup_error(self):
- """Returns an error if prerequisites aren't met"""
- (stdout, stderr) = self._execute('rados', 'lspools')
- pools = stdout.split("\n")
- if not CONF.rbd_pool in pools:
- exception_message = (_("rbd has no pool %s") %
- CONF.rbd_pool)
- raise exception.VolumeBackendAPIException(data=exception_message)
-
- def _supports_layering(self):
- stdout, _ = self._execute('rbd', '--help')
- return 'clone' in stdout
-
- def create_volume(self, volume):
- """Creates a logical volume."""
- if int(volume['size']) == 0:
- size = 100
- else:
- size = int(volume['size']) * 1024
- args = ['rbd', 'create',
- '--pool', CONF.rbd_pool,
- '--size', size,
- volume['name']]
- if self._supports_layering():
- args += ['--new-format']
- self._try_execute(*args)
-
- def _clone(self, volume, src_pool, src_image, src_snap):
- self._try_execute('rbd', 'clone',
- '--pool', src_pool,
- '--image', src_image,
- '--snap', src_snap,
- '--dest-pool', CONF.rbd_pool,
- '--dest', volume['name'])
-
- def _resize(self, volume):
- size = int(volume['size']) * 1024
- self._try_execute('rbd', 'resize',
- '--pool', CONF.rbd_pool,
- '--image', volume['name'],
- '--size', size)
-
- def create_volume_from_snapshot(self, volume, snapshot):
- """Creates a volume from a snapshot."""
- self._clone(volume, CONF.rbd_pool,
- snapshot['volume_name'], snapshot['name'])
- if int(volume['size']):
- self._resize(volume)
-
- def delete_volume(self, volume):
- """Deletes a logical volume."""
- stdout, _ = self._execute('rbd', 'snap', 'ls',
- '--pool', CONF.rbd_pool,
- volume['name'])
- if stdout.count('\n') > 1:
- raise exception.VolumeIsBusy(volume_name=volume['name'])
- self._try_execute('rbd', 'rm',
- '--pool', CONF.rbd_pool,
- volume['name'])
-
- def create_snapshot(self, snapshot):
- """Creates an rbd snapshot"""
- self._try_execute('rbd', 'snap', 'create',
- '--pool', CONF.rbd_pool,
- '--snap', snapshot['name'],
- snapshot['volume_name'])
- if self._supports_layering():
- self._try_execute('rbd', 'snap', 'protect',
- '--pool', CONF.rbd_pool,
- '--snap', snapshot['name'],
- snapshot['volume_name'])
-
- def delete_snapshot(self, snapshot):
- """Deletes an rbd snapshot"""
- if self._supports_layering():
- try:
- self._try_execute('rbd', 'snap', 'unprotect',
- '--pool', CONF.rbd_pool,
- '--snap', snapshot['name'],
- snapshot['volume_name'])
- except exception.ProcessExecutionError:
- raise exception.SnapshotIsBusy(snapshot_name=snapshot['name'])
- self._try_execute('rbd', 'snap', 'rm',
- '--pool', CONF.rbd_pool,
- '--snap', snapshot['name'],
- snapshot['volume_name'])
-
- def local_path(self, volume):
- """Returns the path of the rbd volume."""
- # This is the same as the remote path
- # since qemu accesses it directly.
- return "rbd:%s/%s" % (CONF.rbd_pool, volume['name'])
-
- def ensure_export(self, context, volume):
- """Synchronously recreates an export for a logical volume."""
- pass
-
- def create_export(self, context, volume):
- """Exports the volume"""
- pass
-
- def remove_export(self, context, volume):
- """Removes an export for a logical volume"""
- pass
-
- def check_for_export(self, context, volume_id):
- """Make sure volume is exported."""
- pass
-
- def initialize_connection(self, volume, connector):
- return {
- 'driver_volume_type': 'rbd',
- 'data': {
- 'name': '%s/%s' % (CONF.rbd_pool, volume['name']),
- 'auth_enabled': CONF.rbd_secret_uuid is not None,
- 'auth_username': CONF.rbd_user,
- 'secret_type': 'ceph',
- 'secret_uuid': CONF.rbd_secret_uuid,
- }
- }
-
- def terminate_connection(self, volume, connector):
- pass
-
- def _parse_location(self, location):
- prefix = 'rbd://'
- if not location.startswith(prefix):
- reason = _('Image %s is not stored in rbd') % location
- raise exception.ImageUnacceptable(reason)
- pieces = map(urllib.unquote, location[len(prefix):].split('/'))
- if any(map(lambda p: p == '', pieces)):
- reason = _('Image %s has blank components') % location
- raise exception.ImageUnacceptable(reason)
- if len(pieces) != 4:
- reason = _('Image %s is not an rbd snapshot') % location
- raise exception.ImageUnacceptable(reason)
- return pieces
-
- def _get_fsid(self):
- stdout, _ = self._execute('ceph', 'fsid')
- return stdout.rstrip('\n')
-
- def _is_cloneable(self, image_location):
- try:
- fsid, pool, image, snapshot = self._parse_location(image_location)
- except exception.ImageUnacceptable:
- return False
-
- if self._get_fsid() != fsid:
- reason = _('%s is in a different ceph cluster') % image_location
- LOG.debug(reason)
- return False
-
- # check that we can read the image
- try:
- self._execute('rbd', 'info',
- '--pool', pool,
- '--image', image,
- '--snap', snapshot)
- except exception.ProcessExecutionError:
- LOG.debug(_('Unable to read image %s') % image_location)
- return False
-
- return True
-
- def clone_image(self, volume, image_location):
- if image_location is None or not self._is_cloneable(image_location):
- return False
- _, pool, image, snapshot = self._parse_location(image_location)
- self._clone(volume, pool, image, snapshot)
- self._resize(volume)
- return True
-
- def copy_image_to_volume(self, context, volume, image_service, image_id):
- # TODO(jdurgin): replace with librbd
- # this is a temporary hack, since rewriting this driver
- # to use librbd would take too long
- if CONF.volume_tmp_dir and not os.exists(CONF.volume_tmp_dir):
- os.makedirs(CONF.volume_tmp_dir)
-
- with tempfile.NamedTemporaryFile(dir=CONF.volume_tmp_dir) as tmp:
- image_service.download(context, image_id, tmp)
- # import creates the image, so we must remove it first
- self._try_execute('rbd', 'rm',
- '--pool', CONF.rbd_pool,
- volume['name'])
- self._try_execute('rbd', 'import',
- '--pool', CONF.rbd_pool,
- tmp.name, volume['name'])
-
-
-class SheepdogDriver(VolumeDriver):
- """Executes commands relating to Sheepdog Volumes"""
-
- def check_for_setup_error(self):
- """Returns an error if prerequisites aren't met"""
- try:
- #NOTE(francois-charlier) Since 0.24 'collie cluster info -r'
- # gives short output, but for compatibility reason we won't
- # use it and just check if 'running' is in the output.
- (out, err) = self._execute('collie', 'cluster', 'info')
- if not 'running' in out.split():
- exception_message = _("Sheepdog is not working: %s") % out
- raise exception.VolumeBackendAPIException(
- data=exception_message)
-
- except exception.ProcessExecutionError:
- exception_message = _("Sheepdog is not working")
- raise exception.NovaException(data=exception_message)
-
- def create_volume(self, volume):
- """Creates a sheepdog volume"""
- self._try_execute('qemu-img', 'create',
- "sheepdog:%s" % volume['name'],
- self._sizestr(volume['size']))
-
- def create_volume_from_snapshot(self, volume, snapshot):
- """Creates a sheepdog volume from a snapshot."""
- self._try_execute('qemu-img', 'create', '-b',
- "sheepdog:%s:%s" % (snapshot['volume_name'],
- snapshot['name']),
- "sheepdog:%s" % volume['name'])
-
- def delete_volume(self, volume):
- """Deletes a logical volume"""
- self._try_execute('collie', 'vdi', 'delete', volume['name'])
-
- def create_snapshot(self, snapshot):
- """Creates a sheepdog snapshot"""
- self._try_execute('qemu-img', 'snapshot', '-c', snapshot['name'],
- "sheepdog:%s" % snapshot['volume_name'])
-
- def delete_snapshot(self, snapshot):
- """Deletes a sheepdog snapshot"""
- self._try_execute('collie', 'vdi', 'delete', snapshot['volume_name'],
- '-s', snapshot['name'])
-
- def local_path(self, volume):
- return "sheepdog:%s" % volume['name']
-
- def ensure_export(self, context, volume):
- """Safely and synchronously recreates an export for a logical volume"""
- pass
-
- def create_export(self, context, volume):
- """Exports the volume"""
- pass
-
- def remove_export(self, context, volume):
- """Removes an export for a logical volume"""
- pass
-
- def check_for_export(self, context, volume_id):
- """Make sure volume is exported."""
- pass
-
- def initialize_connection(self, volume, connector):
- return {
- 'driver_volume_type': 'sheepdog',
- 'data': {
- 'name': volume['name']
- }
- }
-
- def terminate_connection(self, volume, connector):
- pass
-
-
-class LoggingVolumeDriver(VolumeDriver):
- """Logs and records calls, for unit tests."""
-
- def check_for_setup_error(self):
- pass
-
- def create_volume(self, volume):
- self.log_action('create_volume', volume)
-
- def delete_volume(self, volume):
- self.log_action('delete_volume', volume)
-
- def local_path(self, volume):
- print "local_path not implemented"
- raise NotImplementedError()
-
- def ensure_export(self, context, volume):
- self.log_action('ensure_export', volume)
-
- def create_export(self, context, volume):
- self.log_action('create_export', volume)
-
- def remove_export(self, context, volume):
- self.log_action('remove_export', volume)
-
- def initialize_connection(self, volume, connector):
- self.log_action('initialize_connection', volume)
-
- def terminate_connection(self, volume, connector):
- self.log_action('terminate_connection', volume)
-
- def check_for_export(self, context, volume_id):
- self.log_action('check_for_export', volume_id)
-
- _LOGS = []
-
- @staticmethod
- def clear_logs():
- LoggingVolumeDriver._LOGS = []
-
- @staticmethod
- def log_action(action, parameters):
- """Logs the command."""
- LOG.debug(_("LoggingVolumeDriver: %s") % (action))
- log_dictionary = {}
- if parameters:
- log_dictionary = dict(parameters)
- log_dictionary['action'] = action
- LOG.debug(_("LoggingVolumeDriver: %s") % (log_dictionary))
- LoggingVolumeDriver._LOGS.append(log_dictionary)
-
- @staticmethod
- def all_logs():
- return LoggingVolumeDriver._LOGS
-
- @staticmethod
- def logs_like(action, **kwargs):
- matches = []
- for entry in LoggingVolumeDriver._LOGS:
- if entry['action'] != action:
- continue
- match = True
- for k, v in kwargs.iteritems():
- if entry.get(k) != v:
- match = False
- break
- if match:
- matches.append(entry)
- return matches
-
-
-def _iscsi_location(ip, target, iqn, lun=None):
- return "%s:%s,%s %s %s" % (ip, CONF.iscsi_port, target, iqn, lun)
diff --git a/nova/volume/iscsi.py b/nova/volume/iscsi.py
deleted file mode 100644
index ce2776920..000000000
--- a/nova/volume/iscsi.py
+++ /dev/null
@@ -1,235 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-Helper code for the iSCSI volume driver.
-
-"""
-import os
-
-from nova import config
-from nova import exception
-from nova import flags
-from nova.openstack.common import cfg
-from nova.openstack.common import fileutils
-from nova.openstack.common import log as logging
-from nova import utils
-
-LOG = logging.getLogger(__name__)
-
-iscsi_helper_opt = [
- cfg.StrOpt('iscsi_helper',
- default='tgtadm',
- help='iscsi target user-land tool to use'),
- cfg.StrOpt('volumes_dir',
- default='$state_path/volumes',
- help='Volume configuration file storage directory'),
-]
-
-CONF = config.CONF
-CONF.register_opts(iscsi_helper_opt)
-
-
-class TargetAdmin(object):
- """iSCSI target administration.
-
- Base class for iSCSI target admin helpers.
- """
-
- def __init__(self, cmd, execute):
- self._cmd = cmd
- self.set_execute(execute)
-
- def set_execute(self, execute):
- """Set the function to be used to execute commands."""
- self._execute = execute
-
- def _run(self, *args, **kwargs):
- self._execute(self._cmd, *args, run_as_root=True, **kwargs)
-
- def create_iscsi_target(self, name, tid, lun, path, **kwargs):
- """Create a iSCSI target and logical unit"""
- raise NotImplementedError()
-
- def remove_iscsi_target(self, tid, lun, vol_id, **kwargs):
- """Remove a iSCSI target and logical unit"""
- raise NotImplementedError()
-
- def _new_target(self, name, tid, **kwargs):
- """Create a new iSCSI target."""
- raise NotImplementedError()
-
- def _delete_target(self, tid, **kwargs):
- """Delete a target."""
- raise NotImplementedError()
-
- def show_target(self, tid, iqn=None, **kwargs):
- """Query the given target ID."""
- raise NotImplementedError()
-
- def _new_logicalunit(self, tid, lun, path, **kwargs):
- """Create a new LUN on a target using the supplied path."""
- raise NotImplementedError()
-
- def _delete_logicalunit(self, tid, lun, **kwargs):
- """Delete a logical unit from a target."""
- raise NotImplementedError()
-
-
-class TgtAdm(TargetAdmin):
- """iSCSI target administration using tgtadm."""
-
- def __init__(self, execute=utils.execute):
- super(TgtAdm, self).__init__('tgtadm', execute)
-
- def _get_target(self, iqn):
- (out, err) = self._execute('tgt-admin', '--show', run_as_root=True)
- lines = out.split('\n')
- for line in lines:
- if iqn in line:
- parsed = line.split()
- tid = parsed[1]
- return tid[:-1]
-
- return None
-
- def create_iscsi_target(self, name, tid, lun, path, **kwargs):
- # Note(jdg) tid and lun aren't used by TgtAdm but remain for
- # compatibility
-
- fileutils.ensure_tree(CONF.volumes_dir)
-
- vol_id = name.split(':')[1]
- volume_conf = """
- <target %s>
- backing-store %s
- </target>
- """ % (name, path)
-
- LOG.info(_('Creating volume: %s') % vol_id)
- volumes_dir = CONF.volumes_dir
- volume_path = os.path.join(volumes_dir, vol_id)
-
- f = open(volume_path, 'w+')
- f.write(volume_conf)
- f.close()
-
- try:
- (out, err) = self._execute('tgt-admin',
- '--update',
- name,
- run_as_root=True)
- except exception.ProcessExecutionError, e:
- LOG.error(_("Failed to create iscsi target for volume "
- "id:%(vol_id)s.") % locals())
-
- #Don't forget to remove the persistent file we created
- os.unlink(volume_path)
- raise exception.ISCSITargetCreateFailed(volume_id=vol_id)
-
- iqn = '%s%s' % (CONF.iscsi_target_prefix, vol_id)
- tid = self._get_target(iqn)
- if tid is None:
- LOG.error(_("Failed to create iscsi target for volume "
- "id:%(vol_id)s. Please ensure your tgtd config file "
- "contains 'include %(volumes_dir)s/*'") % locals())
- raise exception.NotFound()
-
- return tid
-
- def remove_iscsi_target(self, tid, lun, vol_id, **kwargs):
- LOG.info(_('Removing volume: %s') % vol_id)
- vol_uuid_file = 'volume-%s' % vol_id
- volume_path = os.path.join(CONF.volumes_dir, vol_uuid_file)
- if os.path.isfile(volume_path):
- iqn = '%s%s' % (CONF.iscsi_target_prefix,
- vol_uuid_file)
- else:
- raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)
- try:
- self._execute('tgt-admin',
- '--delete',
- iqn,
- run_as_root=True)
- except exception.ProcessExecutionError, e:
- LOG.error(_("Failed to create iscsi target for volume "
- "id:%(volume_id)s.") % locals())
- raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)
-
- os.unlink(volume_path)
-
- def show_target(self, tid, iqn=None, **kwargs):
- if iqn is None:
- raise exception.InvalidParameterValue(
- err=_('valid iqn needed for show_target'))
-
- tid = self._get_target(iqn)
- if tid is None:
- raise exception.NotFound()
-
-
-class IetAdm(TargetAdmin):
- """iSCSI target administration using ietadm."""
-
- def __init__(self, execute=utils.execute):
- super(IetAdm, self).__init__('ietadm', execute)
-
- def create_iscsi_target(self, name, tid, lun, path, **kwargs):
- self._new_target(name, tid, **kwargs)
- self._new_logicalunit(tid, lun, path, **kwargs)
- return tid
-
- def remove_iscsi_target(self, tid, lun, vol_id, **kwargs):
- LOG.info(_('Removing volume: %s') % vol_id)
- self._delete_logicalunit(tid, lun, **kwargs)
- self._delete_target(tid, **kwargs)
-
- def _new_target(self, name, tid, **kwargs):
- self._run('--op', 'new',
- '--tid=%s' % tid,
- '--params', 'Name=%s' % name,
- **kwargs)
-
- def _delete_target(self, tid, **kwargs):
- self._run('--op', 'delete',
- '--tid=%s' % tid,
- **kwargs)
-
- def show_target(self, tid, iqn=None, **kwargs):
- self._run('--op', 'show',
- '--tid=%s' % tid,
- **kwargs)
-
- def _new_logicalunit(self, tid, lun, path, **kwargs):
- self._run('--op', 'new',
- '--tid=%s' % tid,
- '--lun=%d' % lun,
- '--params', 'Path=%s,Type=fileio' % path,
- **kwargs)
-
- def _delete_logicalunit(self, tid, lun, **kwargs):
- self._run('--op', 'delete',
- '--tid=%s' % tid,
- '--lun=%d' % lun,
- **kwargs)
-
-
-def get_target_admin():
- if CONF.iscsi_helper == 'tgtadm':
- return TgtAdm()
- else:
- return IetAdm()
diff --git a/nova/weights.py b/nova/weights.py
new file mode 100644
index 000000000..981171b3e
--- /dev/null
+++ b/nova/weights.py
@@ -0,0 +1,71 @@
+# Copyright (c) 2011-2012 OpenStack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Pluggable Weighing support
+"""
+
+from nova import loadables
+
+
+class WeighedObject(object):
+ """Object with weight information."""
+ def __init__(self, obj, weight):
+ self.obj = obj
+ self.weight = weight
+
+ def __repr__(self):
+ return "<WeighedObject '%s': %s>" % (self.obj, self.weight)
+
+
+class BaseWeigher(object):
+ """Base class for pluggable weighers."""
+ def _weight_multiplier(self):
+ """How weighted this weigher should be. Normally this would
+ be overriden in a subclass based on a config value.
+ """
+ return 1.0
+
+ def _weigh_object(self, obj, weight_properties):
+ """Override in a subclass to specify a weight for a specific
+ object.
+ """
+ return 0.0
+
+ def weigh_objects(self, weighed_obj_list, weight_properties):
+ """Weigh multiple objects. Override in a subclass if you need
+ need access to all objects in order to manipulate weights.
+ """
+ for obj in weighed_obj_list:
+ obj.weight += (self._weight_multiplier() *
+ self._weigh_object(obj.obj, weight_properties))
+
+
+class BaseWeightHandler(loadables.BaseLoader):
+ object_class = WeighedObject
+
+ def get_weighed_objects(self, weigher_classes, obj_list,
+ weighing_properties):
+ """Return a sorted (highest score first) list of WeighedObjects."""
+
+ if not obj_list:
+ return []
+
+ weighed_objs = [self.object_class(obj, 0.0) for obj in obj_list]
+ for weigher_cls in weigher_classes:
+ weigher = weigher_cls()
+ weigher.weigh_objects(weighed_objs, weighing_properties)
+
+ return sorted(weighed_objs, key=lambda x: x.weight, reverse=True)
diff --git a/nova/wsgi.py b/nova/wsgi.py
index b8880dfde..18c28b87b 100644
--- a/nova/wsgi.py
+++ b/nova/wsgi.py
@@ -32,7 +32,6 @@ import webob.exc
from nova import config
from nova import exception
-from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
diff --git a/openstack-common.conf b/openstack-common.conf
index 666fb059e..5f64e6ee2 100644
--- a/openstack-common.conf
+++ b/openstack-common.conf
@@ -1,7 +1,7 @@
[DEFAULT]
# The list of modules to copy from openstack-common
-modules=cfg,context,excutils,fileutils,gettextutils,importutils,iniparser,jsonutils,local,lockutils,log,network_utils,notifier,plugin,policy,setup,timeutils,rpc,uuidutils
+modules=cfg,context,excutils,eventlet_backdoor,fileutils,gettextutils,importutils,iniparser,jsonutils,local,lockutils,log,network_utils,notifier,plugin,policy,setup,timeutils,rpc,uuidutils
# The base module to hold the copy of openstack.common
base=nova
diff --git a/smoketests/test_sysadmin.py b/smoketests/test_sysadmin.py
index d6491c9d4..b05f0ac4b 100644
--- a/smoketests/test_sysadmin.py
+++ b/smoketests/test_sysadmin.py
@@ -249,12 +249,24 @@ class VolumeTests(base.UserSmokeTestCase):
self.assertTrue(volume.status.startswith('in-use'))
- # Give instance time to recognize volume.
- time.sleep(5)
-
def test_003_can_mount_volume(self):
ip = self.data['instance'].private_ip_address
conn = self.connect_ssh(ip, TEST_KEY)
+
+ # NOTE(dprince): give some time for volume to show up in partitions
+ stdin, stdout, stderr = conn.exec_command(
+ 'COUNT="0";'
+ 'until cat /proc/partitions | grep "%s\\$"; do '
+ '[ "$COUNT" -eq "60" ] && exit 1;'
+ 'COUNT=$(( $COUNT + 1 ));'
+ 'sleep 1; '
+ 'done'
+ % self.device.rpartition('/')[2])
+ out = stdout.read()
+ if not out.strip().endswith(self.device.rpartition('/')[2]):
+ self.fail('Timeout waiting for volume partition in instance. %s %s'
+ % (out, stderr.read()))
+
# NOTE(vish): this will create a dev for images that don't have
# udev rules
stdin, stdout, stderr = conn.exec_command(
diff --git a/tools/hacking.py b/tools/hacking.py
index a22e1c6ff..91d8fde60 100755
--- a/tools/hacking.py
+++ b/tools/hacking.py
@@ -271,6 +271,13 @@ def nova_import_alphabetical(logical_line, line_number, lines):
% (split_previous[1], split_line[1]))
+def nova_import_no_db_in_virt(logical_line, filename):
+ if ("nova/virt" in filename and
+ not filename.endswith("fake.py") and
+ "nova import db" in logical_line):
+ yield (0, "NOVA N307: nova.db import not allowed in nova/virt/*")
+
+
def nova_docstring_start_space(physical_line):
"""Check for docstring not start with space.
diff --git a/tools/test-requires b/tools/test-requires
index fc56d3c87..b3d4b5a22 100644
--- a/tools/test-requires
+++ b/tools/test-requires
@@ -2,6 +2,7 @@
distribute>=0.6.24
coverage
+fixtures
mox==0.5.3
nose
testtools
diff --git a/tools/xenserver/destroy_cached_images.py b/tools/xenserver/destroy_cached_images.py
index 01d68b4f6..88f24f4ef 100644
--- a/tools/xenserver/destroy_cached_images.py
+++ b/tools/xenserver/destroy_cached_images.py
@@ -26,7 +26,7 @@ if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'nova', '__init__.py')):
sys.path.insert(0, POSSIBLE_TOPDIR)
from nova import config
-from nova import flags
+from nova import config
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import utils
diff --git a/tools/xenserver/vm_vdi_cleaner.py b/tools/xenserver/vm_vdi_cleaner.py
index 10f9c1ffe..89fc454b1 100755
--- a/tools/xenserver/vm_vdi_cleaner.py
+++ b/tools/xenserver/vm_vdi_cleaner.py
@@ -31,7 +31,6 @@ from nova import config
from nova import context
from nova import db
from nova import exception
-from nova import flags
from nova.openstack.common import timeutils
from nova.virt.xenapi import driver as xenapi_driver
diff --git a/tox.ini b/tox.ini
index a40b1900a..55ac0cd43 100644
--- a/tox.ini
+++ b/tox.ini
@@ -26,11 +26,13 @@ deps=pep8==1.2
commands =
python tools/hacking.py --ignore=N4,E12,E711,E721 --repeat --show-source \
--exclude=.venv,.tox,dist,doc,*openstack/common*,*lib/python*,*egg .
+ python tools/hacking.py --ignore=N4,E12,E711,E721 --repeat --show-source \
+ --filename=nova* bin
[testenv:pylint]
setenv = VIRTUAL_ENV={envdir}
deps = -r{toxinidir}/tools/pip-requires
- pylint==0.25.2
+ pylint==0.26.0
commands = bash tools/lintstack.sh
[testenv:cover]